CACAO
codegen.cpp
Go to the documentation of this file.
1 /* src/vm/jit/aarch64/codegen.cpp - machine code generator for Aarch64
2 
3  Copyright (C) 1996-2013
4  CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5 
6  This file is part of CACAO.
7 
8  This program is free software; you can redistribute it and/or
9  modify it under the terms of the GNU General Public License as
10  published by the Free Software Foundation; either version 2, or (at
11  your option) any later version.
12 
13  This program is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with this program; if not, write to the Free Software
20  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21  02110-1301, USA.
22 
23 */
24 
25 
26 #include "config.h"
27 
28 #include <cassert>
29 #include <cstdio>
30 #include <cmath>
31 
32 #include "vm/types.hpp"
33 
34 #include "md.hpp"
35 #include "md-abi.hpp"
36 
37 #include "vm/jit/aarch64/arch.hpp"
40 
41 #include "mm/memory.hpp"
42 
43 #include "native/localref.hpp"
44 #include "native/native.hpp"
45 
46 #include "threads/lock.hpp"
47 
48 #include "vm/descriptor.hpp"
49 #include "vm/exceptions.hpp"
50 #include "vm/field.hpp"
51 #include "vm/global.hpp"
52 #include "vm/loader.hpp"
53 #include "vm/options.hpp"
54 #include "vm/vm.hpp"
55 
56 #include "vm/jit/abi.hpp"
57 #include "vm/jit/asmpart.hpp"
58 #include "vm/jit/builtin.hpp"
60 #include "vm/jit/dseg.hpp"
61 #include "vm/jit/emit-common.hpp"
62 #include "vm/jit/jit.hpp"
64 #include "vm/jit/parse.hpp"
65 #include "vm/jit/reg.hpp"
66 #include "vm/jit/stacktrace.hpp"
67 #include "vm/jit/trap.hpp"
68 
69 
70 /**
71  * Generates machine code for the method prolog.
72  */
74 {
75  varinfo* var;
76  methoddesc* md;
77  int32_t s1;
78  int32_t p, t, l;
79  int32_t varindex;
80  int i;
81 
82  // Get required compiler data.
83  methodinfo* m = jd->m;
84  codeinfo* code = jd->code;
85  codegendata* cd = jd->cd;
86  registerdata* rd = jd->rd;
87  AsmEmitter asme(cd);
88 
89  /* create stack frame (if necessary) */
90  /* NOTE: SP on aarch64 has to be quad word aligned */
91  int offset = cd->stackframesize * 8;
92  offset += (offset % 16);
93  if (cd->stackframesize) {
94  asme.lda(REG_SP, REG_SP, -offset);
95  }
96 
97  /* save return address and used callee saved registers */
98 
99  p = cd->stackframesize;
100  if (!code_is_leafmethod(code)) {
101  p--; asme.lst(REG_RA, REG_SP, offset - 8);
102  }
103  for (i = INT_SAV_CNT - 1; i >= rd->savintreguse; i--) {
104  p--; asme.lst(rd->savintregs[i], REG_SP, p * 8);
105  }
106  for (i = FLT_SAV_CNT - 1; i >= rd->savfltreguse; i--) {
107  p--; asme.dst(rd->savfltregs[i], REG_SP, p * 8);
108  }
109 
110  /* take arguments out of register or stack frame */
111 
112  md = m->parseddesc;
113 
114  for (p = 0, l = 0; p < md->paramcount; p++) {
115  t = md->paramtypes[p].type;
116 
117  varindex = jd->local_map[l * 5 + t];
118 
119  l++;
120  if (IS_2_WORD_TYPE(t)) /* increment local counter for 2 word types */
121  l++;
122 
123  if (varindex == jitdata::UNUSED)
124  continue;
125 
126  var = VAR(varindex);
127 
128  s1 = md->params[p].regoff;
129 
130  if (IS_INT_LNG_TYPE(t)) { /* integer args */
131  if (!md->params[p].inmemory) { /* register arguments */
132  if (!IS_INMEMORY(var->flags))
133  asme.mov(var->vv.regoff, s1);
134  else
135  asme.lst(s1, REG_SP, var->vv.regoff);
136  }
137  else { /* stack arguments */
138  if (!IS_INMEMORY(var->flags))
139  asme.lld(var->vv.regoff, REG_SP, cd->stackframesize * 8 + s1);
140  else
141  var->vv.regoff = cd->stackframesize * 8 + s1;
142  }
143  }
144  else { /* floating args */
145  if (!md->params[p].inmemory) { /* register arguments */
146  if (!IS_INMEMORY(var->flags))
147  if (IS_2_WORD_TYPE(t))
148  asme.dmov(var->vv.regoff, s1);
149  else
150  asme.fmov(var->vv.regoff, s1);
151  else
152  if (IS_2_WORD_TYPE(t))
153  asme.dst(s1, REG_SP, var->vv.regoff);
154  else
155  asme.fst(s1, REG_SP, var->vv.regoff);
156  }
157  else { /* stack arguments */
158  if (!(var->flags & INMEMORY))
159  if (IS_2_WORD_TYPE(t))
160  asme.dld(var->vv.regoff, REG_SP, cd->stackframesize * 8 + s1);
161  else
162  asme.fld(var->vv.regoff, REG_SP, cd->stackframesize * 8 + s1);
163  else
164  var->vv.regoff = cd->stackframesize * 8 + s1;
165  }
166  }
167  }
168 }
169 
170 
171 /**
172  * Generates machine code for the method epilog.
173  */
175 {
176  int32_t p;
177  int i;
178 
179  // Get required compiler data.
180  codeinfo* code = jd->code;
181  codegendata* cd = jd->cd;
182  registerdata* rd = jd->rd;
183  AsmEmitter asme(cd);
184 
185  int offset = cd->stackframesize * 8;
186  offset += (offset % 16);
187 
188  p = cd->stackframesize;
189 
190  /* restore return address */
191 
192  if (!code_is_leafmethod(code)) {
193  p--; asme.lld(REG_RA, REG_SP, offset - 8);
194  }
195 
196  /* restore saved registers */
197 
198  for (i = INT_SAV_CNT - 1; i >= rd->savintreguse; i--) {
199  p--; asme.lld(rd->savintregs[i], REG_SP, p * 8);
200  }
201  for (i = FLT_SAV_CNT - 1; i >= rd->savfltreguse; i--) {
202  p--; asme.dld(rd->savfltregs[i], REG_SP, p * 8);
203  }
204 
205  /* deallocate stack */
206 
207  if (cd->stackframesize) {
208  asme.lda(REG_SP, REG_SP, offset);
209  }
210 
211  asme.ret();
212 }
213 
214 
215 /**
216  * Generates machine code for one ICMD.
217  */
219 {
220  varinfo* var;
221  builtintable_entry* bte;
222  methodinfo* lm; // Local methodinfo for ICMD_INVOKE*.
223  unresolved_method* um;
224  fieldinfo* fi;
225  unresolved_field* uf;
226  int32_t fieldtype;
227  int32_t s1, s2, s3, d = 0;
228  int32_t disp;
229 
230  // Get required compiler data.
231  codegendata* cd = jd->cd;
232 
233  AsmEmitter asme(cd);
234 
235  switch (iptr->opc) {
236 
237  /* constant operations ************************************************/
238 
239  case ICMD_ACONST: /* ... ==> ..., constant */
240  d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
241 
242  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
243  constant_classref *cr = iptr->sx.val.c.ref;
244 
245  disp = dseg_add_unique_address(cd, cr);
246 
247  /* XXX Only add the patcher, if this position needs to
248  be patched. If there was a previous position which
249  resolved the same class, the returned displacement
250  of dseg_add_address is ok to use. */
251 
253  cr, disp);
254 
255  asme.ald(d, REG_PV, disp);
256  }
257  else {
258  if (iptr->sx.val.anyptr == NULL) {
259  asme.lconst(d, 0);
260  }
261  else {
262  disp = dseg_add_address(cd, iptr->sx.val.anyptr);
263  asme.ald(d, REG_PV, disp);
264  }
265  }
266  emit_store_dst(jd, iptr, d);
267  break;
268 
269  case ICMD_FCONST: /* ... ==> ..., constant */
270 
271  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
272  disp = dseg_add_float(cd, iptr->sx.val.f);
273  asme.fld(d, REG_PV, disp);
274  emit_store_dst(jd, iptr, d);
275  break;
276 
277  case ICMD_DCONST: /* ... ==> ..., constant */
278 
279  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
280  disp = dseg_add_double(cd, iptr->sx.val.d);
281  asme.dld(d, REG_PV, disp);
282  emit_store_dst(jd, iptr, d);
283  break;
284 
285  /* integer operations *************************************************/
286 
287  case ICMD_INEG: /* ..., value ==> ..., - value */
288 
289  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
290  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
291  asme.isub(d, REG_ZERO, s1);
292  emit_store_dst(jd, iptr, d);
293  break;
294 
295  case ICMD_LNEG: /* ..., value ==> ..., - value */
296 
297  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
298  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
299  asme.lsub(d, REG_ZERO, s1);
300  emit_store_dst(jd, iptr, d);
301  break;
302 
303  case ICMD_I2L: /* ..., value ==> ..., value */
304 
305  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
306  d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
307  asme.sxtw(d, s1);
308  emit_store_dst(jd, iptr, d);
309  break;
310 
311  case ICMD_L2I: /* ..., value ==> ..., value */
312 
313  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
314  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
315  asme.ubfx(d, s1);
316  emit_store_dst(jd, iptr, d);
317  break;
318 
319  case ICMD_INT2BYTE: /* ..., value ==> ..., value */
320 
321  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
322  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
323  asme.sxtb(d, s1);
324  emit_store_dst(jd, iptr, d);
325  break;
326 
327  case ICMD_INT2CHAR: /* ..., value ==> ..., value */
328 
329  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
330  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
331  asme.uxth(d, s1);
332  emit_store_dst(jd, iptr, d);
333  break;
334 
335  case ICMD_INT2SHORT: /* ..., value ==> ..., value */
336 
337  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
338  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
339  asme.sxth(d, s1);
340  emit_store_dst(jd, iptr, d);
341  break;
342 
343  case ICMD_IADD: /* ..., val1, val2 ==> ..., val1 + val2 */
344 
345  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
346  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
347  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
348  asme.iadd(d, s1, s2);
349  emit_store_dst(jd, iptr, d);
350  break;
351 
352  case ICMD_LADD: /* ..., val1, val2 ==> ..., val1 + val2 */
353 
354  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
355  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
356  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
357  asme.ladd(d, s1, s2);
358  emit_store_dst(jd, iptr, d);
359  break;
360 
361  case ICMD_IINC:
362  case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
363  /* sx.val.i = constant */
364 
365  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
366  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
367 
368  if ((iptr->sx.val.i >= 0) && (iptr->sx.val.i <= 0xffffff)) {
369  asme.iadd_imm(d, s1, iptr->sx.val.i);
370  } else if ((-iptr->sx.val.i >= 0) && (-iptr->sx.val.i <= 0xffffff)) {
371  asme.isub_imm(d, s1, -iptr->sx.val.i);
372  } else {
373  asme.iconst(REG_ITMP2, iptr->sx.val.i);
374  asme.iadd(d, s1, REG_ITMP2);
375  }
376 
377  emit_store_dst(jd, iptr, d);
378  break;
379 
380  case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
381  /* sx.val.l = constant */
382 
383  // assert(iptr->sx.val.l >= 0); // TODO: check why this was here
384 
385  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
386  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
387  if ((iptr->sx.val.l >= 0) && (iptr->sx.val.l <= 0xffffff)) {
388  asme.ladd_imm(d, s1, iptr->sx.val.l);
389  } else if ((-iptr->sx.val.l >= 0) && (-iptr->sx.val.l <= 0xffffff)) {
390  asme.lsub_imm(d, s1, -iptr->sx.val.l);
391  } else {
392  asme.lconst(REG_ITMP2, iptr->sx.val.l);
393  asme.ladd(d, s1, REG_ITMP2);
394  }
395  emit_store_dst(jd, iptr, d);
396  break;
397 
398  case ICMD_ISUB: /* ..., val1, val2 ==> ..., val1 - val2 */
399 
400  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
401  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
402  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
403  asme.isub(d, s1, s2);
404  emit_store_dst(jd, iptr, d);
405  break;
406 
407  case ICMD_LSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
408 
409  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
410  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
411  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
412  asme.lsub(d, s1, s2);
413  emit_store_dst(jd, iptr, d);
414  break;
415 
416  case ICMD_ISUBCONST: /* ..., value ==> ..., value - constant */
417  /* sx.val.i = constant */
418 
419  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
420  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
421 
422  if ((iptr->sx.val.i >= 0) && (iptr->sx.val.i <= 0xffffff)) {
423  asme.isub_imm(d, s1, iptr->sx.val.i);
424  } else if ((-iptr->sx.val.i >= 0) && (-iptr->sx.val.i <= 0xffffff)) {
425  asme.iadd_imm(d, s1, -iptr->sx.val.i);
426  } else {
427  asme.iconst(REG_ITMP2, iptr->sx.val.i);
428  asme.isub(d, s1, REG_ITMP2);
429  }
430 
431  emit_store_dst(jd, iptr, d);
432  break;
433 
434  case ICMD_LSUBCONST: /* ..., value ==> ..., value - constant */
435  /* sx.val.l = constant */
436 
437  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
438  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
439 
440  if ((iptr->sx.val.l >= 0) && (iptr->sx.val.l <= 0xffffff)) {
441  asme.lsub_imm(d, s1, iptr->sx.val.l);
442  } else if ((-iptr->sx.val.l >= 0) && (-iptr->sx.val.l <= 0xffffff)) {
443  asme.ladd_imm(d, s1, -iptr->sx.val.l);
444  } else {
445  asme.lconst(REG_ITMP2, iptr->sx.val.l);
446  asme.lsub(d, s1, REG_ITMP2);
447  }
448 
449  emit_store_dst(jd, iptr, d);
450  break;
451 
452  case ICMD_IMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
453 
454  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
455  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
456  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
457  asme.imul(d, s1, s2);
458  asme.ubfx(d, d); // cut back to int
459  emit_store_dst(jd, iptr, d);
460  break;
461 
462  case ICMD_IMULCONST: /* ..., value ==> ..., value * constant */
463  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
464  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
465  asme.iconst(REG_ITMP2, iptr->sx.val.i);
466  asme.imul(d, s1, REG_ITMP2);
467  asme.ubfx(d, d); // cut back to int
468  emit_store_dst(jd, iptr, d);
469  break;
470 
471  case ICMD_LMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
472 
473  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
474  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
475  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
476  asme.lmul(d, s1, s2);
477  emit_store_dst(jd, iptr, d);
478  break;
479 
480  case ICMD_LMULCONST: /* ..., value ==> ..., value * constant */
481  /* sx.val.l = constant */
482 
483  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
484  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
485  asme.lconst(REG_ITMP2, iptr->sx.val.l);
486  asme.lmul(d, s1, REG_ITMP2);
487  emit_store_dst(jd, iptr, d);
488  break;
489 
490  case ICMD_IMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
491  /* sx.val.i = constant */
492 
493  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
494  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
495  asme.ilsl_imm(d, s1, iptr->sx.val.i);
496  asme.ubfx(d, d); // cut back to int
497  emit_store_dst(jd, iptr, d);
498  break;
499 
500  case ICMD_LMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
501  /* sx.val.i = constant */
502 
503  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
504  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
505  asme.llsl_imm(d, s1, iptr->sx.val.i);
506  emit_store_dst(jd, iptr, d);
507  break;
508 
509  case ICMD_IREM: /* ..., val1, val2 ==> ..., val1 % val2 */
510 
511  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
512  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
513  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
514  emit_arithmetic_check(cd, iptr, s2);
515 
516  asme.idiv(REG_ITMP3, s1, s2);
517  asme.imsub(d, REG_ITMP3, s2, s1);
518 
519  emit_store_dst(jd, iptr, d);
520  break;
521 
522  case ICMD_LREM: /* ..., val1, val2 ==> ..., val1 % val2 */
523 
524  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
525  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
526  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
527  emit_arithmetic_check(cd, iptr, s2);
528 
529  asme.ldiv(REG_ITMP3, s1, s2);
530  asme.lmsub(d, REG_ITMP3, s2, s1);
531 
532  emit_store_dst(jd, iptr, d);
533  break;
534 
535  case ICMD_IDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
536 
537  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
538  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
539  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
540  emit_arithmetic_check(cd, iptr, s2);
541 
542  asme.idiv(d, s1, s2);
543 
544  emit_store_dst(jd, iptr, d);
545  break;
546 
547  case ICMD_LDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
548 
549  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
550  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
551  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
552  emit_arithmetic_check(cd, iptr, s2);
553 
554  asme.ldiv(d, s1, s2);
555 
556  emit_store_dst(jd, iptr, d);
557  break;
558 
559  // TODO: implement this using shift operators
560  case ICMD_IDIVPOW2: /* ..., value ==> ..., value / (2 ^ constant) */
561  /* sx.val.i = constant */
562  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
563  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
564 
565  asme.iconst(REG_ITMP3, pow(2, iptr->sx.val.i));
566  asme.idiv(d, s1, REG_ITMP3);
567 
568  emit_store_dst(jd, iptr, d);
569  break;
570 
571  case ICMD_IREMPOW2: /* ..., value ==> ..., value % constant */
572  /* sx.val.i = constant [ (2 ^ x) - 1 ] */
573  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
574  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
575 
576  // Use absolute value
577  asme.icmp_imm(s1, 0);
578  asme.icsneg(d, s1, s1, COND_PL);
579 
580  asme.iconst(REG_ITMP3, iptr->sx.val.i);
581  asme.iand(d, d, REG_ITMP3);
582 
583  // Negate the result again if the value was negative
584  asme.icsneg(d, d, d, COND_PL);
585 
586  emit_store_dst(jd, iptr, d);
587  break;
588 
589  case ICMD_ISHL: /* ..., val1, val2 ==> ..., val1 << val2 */
590 
591  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
592  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
593  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
594 
595  asme.iconst(REG_ITMP3, 0x1f);
596  asme.iand(REG_ITMP3, s2, REG_ITMP3);
597  asme.ilsl(d, s1, REG_ITMP3);
598  asme.ubfx(d, d); // cut back to int
599 
600  emit_store_dst(jd, iptr, d);
601  break;
602 
603  case ICMD_LSHL: /* ..., val1, val2 ==> ..., val1 << val2 */
604 
605  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
606  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
607  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
608 
609  asme.lconst(REG_ITMP3, 0x3f);
610  asme.land(REG_ITMP3, s2, REG_ITMP3);
611  asme.llsl(d, s1, REG_ITMP3);
612 
613  emit_store_dst(jd, iptr, d);
614  break;
615 
616  case ICMD_ISHLCONST: /* ..., value ==> ..., value << constant */
617  /* sx.val.i = constant */
618 
619  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
620  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
621 
622  asme.ilsl_imm(d, s1, iptr->sx.val.i & 0x1f); // shift amout is between 0 and 31 incl
623  asme.ubfx(d, d); // cut back to int
624 
625  emit_store_dst(jd, iptr, d);
626  break;
627 
628  case ICMD_LSHLCONST: /* ..., value ==> ..., value << constant */
629  /* sx.val.i = constant */
630 
631  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
632  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
633 
634  asme.llsl_imm(d, s1, iptr->sx.val.i & 0x3f);
635 
636  emit_store_dst(jd, iptr, d);
637  break;
638 
639  case ICMD_ISHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
640 
641  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
642  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
643  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
644 
645  asme.iconst(REG_ITMP3, 0x1f);
646  asme.iand(REG_ITMP3, s2, REG_ITMP3);
647  asme.iasr(d, s1, REG_ITMP3);
648 
649  emit_store_dst(jd, iptr, d);
650  break;
651 
652  case ICMD_LSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
653 
654  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
655  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
656  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
657 
658  asme.lconst(REG_ITMP3, 0x3f);
659  asme.land(REG_ITMP3, s2, REG_ITMP3);
660  asme.lasr(d, s1, REG_ITMP3);
661 
662  emit_store_dst(jd, iptr, d);
663  break;
664 
665  case ICMD_ISHRCONST: /* ..., value ==> ..., value >> constant */
666  /* sx.val.i = constant */
667 
668  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
669  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
670 
671  asme.iasr_imm(d, s1, iptr->sx.val.i & 0x1f);
672 
673  emit_store_dst(jd, iptr, d);
674  break;
675 
676  case ICMD_LSHRCONST: /* ..., value ==> ..., value >> constant */
677  /* sx.val.i = constant */
678 
679  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
680  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
681 
682  asme.lasr_imm(d, s1, iptr->sx.val.i & 0x3f); // TODO: is the constant really in sx.val.i?
683 
684  emit_store_dst(jd, iptr, d);
685  break;
686 
687  case ICMD_IUSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
688 
689  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
690  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
691  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
692 
693  asme.iconst(REG_ITMP3, 0x1f);
694  asme.iand(REG_ITMP3, s2, REG_ITMP3);
695  asme.ilsr(d, s1, REG_ITMP3);
696 
697  emit_store_dst(jd, iptr, d);
698  break;
699 
700  case ICMD_LUSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
701 
702  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
703  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
704  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
705 
706  asme.lconst(REG_ITMP3, 0x3f);
707  asme.land(REG_ITMP3, s2, REG_ITMP3);
708  asme.llsr(d, s1, REG_ITMP3);
709 
710  emit_store_dst(jd, iptr, d);
711  break;
712 
713  case ICMD_IUSHRCONST: /* ..., value ==> ..., value >> constant */
714  /* sx.val.i = constant */
715 
716  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
717  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
718 
719  asme.ilsr_imm(d, s1, iptr->sx.val.i & 0x1f);
720 
721  emit_store_dst(jd, iptr, d);
722  break;
723 
724  case ICMD_LUSHRCONST: /* ..., value ==> ..., value >> constant */
725  /* sx.val.i = constant */
726 
727  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
728  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
729 
730  asme.llsr_imm(d, s1, iptr->sx.val.i & 0x3f);
731 
732  emit_store_dst(jd, iptr, d);
733  break;
734 
735  case ICMD_IAND: /* ..., val1, val2 ==> ..., val1 & val2 */
736 
737  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
738  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
739  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
740 
741  asme.iand(d, s1, s2);
742 
743  emit_store_dst(jd, iptr, d);
744  break;
745 
746  case ICMD_LAND: /* ..., val1, val2 ==> ..., val1 & val2 */
747 
748  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
749  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
750  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
751 
752  asme.land(d, s1, s2);
753 
754  emit_store_dst(jd, iptr, d);
755  break;
756 
757  // TODO: implement this using the immediate variant
758  case ICMD_IANDCONST: /* ..., value ==> ..., value & constant */
759  /* sx.val.i = constant */
760 
761  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
762  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
763 
764  asme.iconst(REG_ITMP3, iptr->sx.val.i);
765  asme.iand(d, s1, REG_ITMP3);
766 
767  emit_store_dst(jd, iptr, d);
768  break;
769 
770  // TODO: implement this using the immediate variant
771  case ICMD_LANDCONST: /* ..., value ==> ..., value & constant */
772  /* sx.val.l = constant */
773 
774  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
775  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
776 
777  asme.lconst(REG_ITMP3, iptr->sx.val.l);
778  asme.land(d, s1, REG_ITMP3);
779 
780  emit_store_dst(jd, iptr, d);
781  break;
782 
783  case ICMD_IOR: /* ..., val1, val2 ==> ..., val1 | val2 */
784 
785  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
786  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
787  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
788 
789  asme.ior(d, s1, s2);
790 
791  emit_store_dst(jd, iptr, d);
792  break;
793 
794  case ICMD_LOR: /* ..., val1, val2 ==> ..., val1 | val2 */
795 
796  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
797  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
798  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
799 
800  asme.lor(d, s1, s2);
801 
802  emit_store_dst(jd, iptr, d);
803  break;
804 
805  // TODO: implement this using the immediate variant
806  case ICMD_IORCONST: /* ..., value ==> ..., value | constant */
807  /* sx.val.i = constant */
808 
809  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
810  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
811 
812  asme.iconst(REG_ITMP2, iptr->sx.val.i);
813  asme.ior(d, s1, REG_ITMP2);
814 
815  emit_store_dst(jd, iptr, d);
816  break;
817 
818  // TODO: implement this using the immediate variant
819  case ICMD_LORCONST: /* ..., value ==> ..., value | constant */
820  /* sx.val.l = constant */
821 
822  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
823  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
824 
825  asme.lconst(REG_ITMP2, iptr->sx.val.l);
826  asme.lor(d, s1, REG_ITMP2);
827 
828  emit_store_dst(jd, iptr, d);
829  break;
830 
831  case ICMD_IXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
832 
833  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
834  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
835  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
836 
837  asme.ixor(d, s1, s2);
838 
839  emit_store_dst(jd, iptr, d);
840  break;
841 
842  case ICMD_LXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
843 
844  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
845  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
846  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
847 
848  asme.lxor(d, s1, s2);
849 
850  emit_store_dst(jd, iptr, d);
851  break;
852 
853  // TODO: implement this using the immediate variant
854  case ICMD_IXORCONST: /* ..., value ==> ..., value ^ constant */
855  /* sx.val.i = constant */
856 
857  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
858  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
859 
860  asme.iconst(REG_ITMP2, iptr->sx.val.i);
861  asme.ixor(d, s1, REG_ITMP2);
862 
863  emit_store_dst(jd, iptr, d);
864  break;
865 
866  // TODO: implement this using the immediate variant
867  case ICMD_LXORCONST: /* ..., value ==> ..., value ^ constant */
868  /* sx.val.l = constant */
869 
870  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
871  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
872 
873  asme.lconst(REG_ITMP2, iptr->sx.val.l);
874  asme.lxor(d, s1, REG_ITMP2);
875 
876  emit_store_dst(jd, iptr, d);
877  break;
878 
879  /* floating operations ************************************************/
880 
881  case ICMD_FNEG: /* ..., value ==> ..., - value */
882 
883  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
884  d = codegen_reg_of_dst(jd, iptr, REG_FTMP2);
885  asme.fneg(d, s1);
886  emit_store_dst(jd, iptr, d);
887  break;
888 
889  case ICMD_DNEG: /* ..., value ==> ..., - value */
890 
891  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
892  d = codegen_reg_of_dst(jd, iptr, REG_FTMP2);
893  asme.dneg(d, s1);
894  emit_store_dst(jd, iptr, d);
895  break;
896 
897  case ICMD_FADD: /* ..., val1, val2 ==> ..., val1 + val2 */
898 
899  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
900  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
901  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
902  asme.fadd(d, s1, s2);
903  emit_store_dst(jd, iptr, d);
904  break;
905 
906  case ICMD_DADD: /* ..., val1, val2 ==> ..., val1 + val2 */
907 
908  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
909  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
910  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
911  asme.dadd(d, s1, s2);
912  emit_store_dst(jd, iptr, d);
913  break;
914 
915  case ICMD_FSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
916 
917  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
918  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
919  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
920  asme.fsub(d, s1, s2);
921  emit_store_dst(jd, iptr, d);
922  break;
923 
924  case ICMD_DSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
925 
926  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
927  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
928  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
929  asme.dsub(d, s1, s2);
930  emit_store_dst(jd, iptr, d);
931  break;
932 
933  case ICMD_FMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
934 
935  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
936  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
937  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
938  if (d == s1 || d == s2) {
939  asme.fmul(REG_FTMP3, s1, s2);
940  asme.fmov(d, REG_FTMP3);
941  } else {
942  asme.fmul(d, s1, s2);
943  }
944  emit_store_dst(jd, iptr, d);
945  break;
946 
947  case ICMD_DMUL: /* ..., val1, val2 ==> ..., val1 *** val2 */
948 
949  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
950  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
951  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
952  if (d == s1 || d == s2) {
953  asme.dmul(REG_FTMP3, s1, s2);
954  asme.dmov(d, REG_FTMP3);
955  } else {
956  asme.dmul(d, s1, s2);
957  }
958  emit_store_dst(jd, iptr, d);
959  break;
960 
961  case ICMD_FDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
962 
963  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
964  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
965  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
966  asme.fdiv(d, s1, s2);
967  emit_store_dst(jd, iptr, d);
968  break;
969 
970  case ICMD_DDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
971 
972  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
973  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
974  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
975  asme.ddiv(d, s1, s2);
976  emit_store_dst(jd, iptr, d);
977  break;
978 
979  case ICMD_I2F: /* ..., value ==> ..., (float) value */
980  case ICMD_L2F:
981  case ICMD_I2D: /* ..., value ==> ..., (double) value */
982  case ICMD_L2D:
983  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
984  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
985 
986  switch (iptr->opc) {
987  case ICMD_I2F: asme.i2f(d, s1); break;
988  case ICMD_L2F: asme.l2f(d, s1); break;
989  case ICMD_I2D: asme.i2d(d, s1); break;
990  case ICMD_L2D: asme.l2d(d, s1); break;
991  }
992 
993  emit_store_dst(jd, iptr, d);
994  break;
995 
996  case ICMD_F2I: /* ..., value ==> ..., (int) value */
997  case ICMD_D2I:
998  case ICMD_F2L: /* ..., value ==> ..., (long) value */
999  case ICMD_D2L:
1000  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
1001  d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
1002 
1003  // If the fp value is NaN (unordered) set the result to 0
1004  asme.iconst(d, 0);
1005 
1006  // Use the correct comparison instruction
1007  if (iptr->opc == ICMD_F2I || iptr->opc == ICMD_F2L)
1008  asme.fcmp(s1);
1009  else
1010  asme.dcmp(s1, s1);
1011 
1012  // Jump over the conversion if unordered
1013  asme.b_vs(2);
1014 
1015  // Rounding towards zero (see Java spec)
1016  switch (iptr->opc) {
1017  case ICMD_F2I: asme.f2i(d, s1); break;
1018  case ICMD_D2I: asme.d2i(d, s1); break;
1019  case ICMD_F2L: asme.f2l(d, s1); break;
1020  case ICMD_D2L: asme.d2l(d, s1); break;
1021  }
1022 
1023  emit_store_dst(jd, iptr, d);
1024  break;
1025 
1026  case ICMD_F2D: /* ..., value ==> ..., (double) value */
1027 
1028  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
1029  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
1030  asme.f2d(d, s1);
1031  emit_store_dst(jd, iptr, d);
1032  break;
1033 
1034  case ICMD_D2F: /* ..., value ==> ..., (float) value */
1035 
1036  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
1037  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
1038  asme.d2f(d, s1);
1039  emit_store_dst(jd, iptr, d);
1040  break;
1041 
1042  case ICMD_FCMPL: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
1043  case ICMD_DCMPL:
1044  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
1045  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
1046  d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
1047 
1048  if (iptr->opc == ICMD_FCMPL)
1049  asme.fcmp(s1, s2);
1050  else
1051  asme.dcmp(s1, s2);
1052 
1053  asme.iconst(d, 0);
1054  asme.iconst(REG_ITMP1, 1);
1055  asme.iconst(REG_ITMP2, -1);
1056 
1057  /* set to -1 if less than or unordered (NaN) */
1058  /* set to 1 if greater than */
1060 
1061  /* set to 0 if equal or result of previous csel */
1062  asme.icsel(d, d, REG_ITMP1, COND_EQ);
1063 
1064  emit_store_dst(jd, iptr, d);
1065  break;
1066 
1067  case ICMD_FCMPG: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
1068  case ICMD_DCMPG:
1069  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
1070  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
1071  d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
1072 
1073  if (iptr->opc == ICMD_FCMPG)
1074  asme.fcmp(s1, s2);
1075  else
1076  asme.dcmp(s1, s2);
1077 
1078  asme.iconst(d, 0);
1079  asme.iconst(REG_ITMP1, 1);
1080  asme.iconst(REG_ITMP2, -1);
1081 
1082  /* set to 1 if greater than or unordered (NaN) */
1083  /* set to -1 if less than */
1085 
1086  /* set to 0 if equal or result of previous csel */
1087  asme.icsel(d, d, REG_ITMP1, COND_EQ);
1088 
1089  emit_store_dst(jd, iptr, d);
1090  break;
1091 
1092  /* memory operations **************************************************/
1093 
1094  case ICMD_BALOAD: /* ..., arrayref, index ==> ..., value */
1095 
1096  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1097  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1098  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1099  /* implicit null-pointer check */
1100  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1101 
1102  asme.ladd(REG_ITMP1, s1, s2);
1103  asme.ldrsb32(d, REG_ITMP1, OFFSET (java_bytearray_t, data[0]));
1104 
1105  emit_store_dst(jd, iptr, d);
1106  break;
1107 
1108  case ICMD_CALOAD: /* ..., arrayref, index ==> ..., value */
1109 
1110  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1111  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1112  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1113  /* implicit null-pointer check */
1114  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1115 
1116  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 1); /* REG_ITMP1 = s1 + (2 * s2) */
1117  asme.ldrh(d, REG_ITMP1, OFFSET(java_chararray_t, data[0]));
1118 
1119  emit_store_dst(jd, iptr, d);
1120  break;
1121 
1122  case ICMD_SALOAD: /* ..., arrayref, index ==> ..., value */
1123 
1124  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1125  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1126  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1127  /* implicit null-pointer check */
1128  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1129 
1130  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 1); /* REG_ITMP1 = s1 + (2 * s2) */
1131  asme.ldrsh32(d, REG_ITMP1, OFFSET(java_shortarray_t, data[0]));
1132 
1133  emit_store_dst(jd, iptr, d);
1134  break;
1135 
1136  case ICMD_IALOAD: /* ..., arrayref, index ==> ..., value */
1137 
1138  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1139  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1140  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1141  /* implicit null-pointer check */
1142  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1143 
1144  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 2);
1145  asme.ild(d, REG_ITMP1, OFFSET(java_intarray_t, data[0]));
1146 
1147  emit_store_dst(jd, iptr, d);
1148  break;
1149 
1150  case ICMD_LALOAD: /* ..., arrayref, index ==> ..., value */
1151 
1152  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1153  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1154  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1155  /* implicit null-pointer check */
1156  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1157 
1158  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1159  asme.lld(d, REG_ITMP1, OFFSET(java_longarray_t, data[0]));
1160 
1161  emit_store_dst(jd, iptr, d);
1162  break;
1163 
1164  case ICMD_FALOAD: /* ..., arrayref, index ==> ..., value */
1165 
1166  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1167  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1168  d = codegen_reg_of_dst(jd, iptr, REG_FTMP2);
1169  /* implicit null-pointer check */
1170  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1171 
1172  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 2);
1173  asme.fld(d, REG_ITMP1, OFFSET(java_floatarray_t, data[0]));
1174 
1175  emit_store_dst(jd, iptr, d);
1176  break;
1177 
1178  case ICMD_DALOAD: /* ..., arrayref, index ==> ..., value */
1179 
1180  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1181  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1182  d = codegen_reg_of_dst(jd, iptr, REG_FTMP2);
1183  /* implicit null-pointer check */
1184  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1185 
1186  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1187  asme.dld(d, REG_ITMP1, OFFSET(java_doublearray_t, data[0]));
1188 
1189  emit_store_dst(jd, iptr, d);
1190  break;
1191 
1192  case ICMD_AALOAD: /* ..., arrayref, index ==> ..., value */
1193 
1194  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1195  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1196  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1197  /* implicit null-pointer check */
1198  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1199 
1200  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1201  asme.ald(d, REG_ITMP1, OFFSET(java_objectarray_t, data[0]));
1202 
1203  emit_store_dst(jd, iptr, d);
1204  break;
1205 
1206  case ICMD_BASTORE: /* ..., arrayref, index, value ==> ... */
1207 
1208  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1209  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1210  /* implicit null-pointer check */
1211  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1212  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1213 
1214  asme.ladd(REG_ITMP1, s1, s2);
1215  asme.strb(s3, REG_ITMP1, OFFSET(java_bytearray_t, data[0]));
1216 
1217  break;
1218 
1219  case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
1220 
1221  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1222  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1223  /* implicit null-pointer check */
1224  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1225  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1226 
1227  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 1); // REG_ITMP1 = s1 + (2 * s2)
1228  asme.strh(s3, REG_ITMP1, OFFSET(java_chararray_t, data[0]));
1229 
1230  break;
1231 
1232  case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
1233 
1234  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1235  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1236  /* implicit null-pointer check */
1237  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1238  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1239 
1240  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 1); // REG_ITMP1 = s1 + (2 * s2)
1241  asme.strh(s3, REG_ITMP1, OFFSET(java_shortarray_t, data[0]));
1242 
1243  break;
1244 
1245  case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
1246 
1247  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1248  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1249  /* implicit null-pointer check */
1250  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1251  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1252 
1253  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 2);
1254  asme.ist(s3, REG_ITMP1, OFFSET(java_intarray_t, data[0]));
1255 
1256  break;
1257 
1258  case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
1259 
1260  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1261  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1262  /* implicit null-pointer check */
1263  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1264  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1265 
1266  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1267  asme.lst(s3, REG_ITMP1, OFFSET(java_longarray_t, data[0]));
1268 
1269  break;
1270 
1271  case ICMD_FASTORE: /* ..., arrayref, index, value ==> ... */
1272 
1273  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1274  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1275  /* implicit null-pointer check */
1276  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1277  s3 = emit_load_s3(jd, iptr, REG_FTMP3);
1278 
1279  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 2);
1280  asme.fst(s3, REG_ITMP1, OFFSET(java_floatarray_t, data[0]));
1281 
1282  break;
1283 
1284  case ICMD_DASTORE: /* ..., arrayref, index, value ==> ... */
1285 
1286  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1287  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1288  /* implicit null-pointer check */
1289  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1290  s3 = emit_load_s3(jd, iptr, REG_FTMP3);
1291 
1292  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1293  asme.dst(s3, REG_ITMP1, OFFSET(java_doublearray_t, data[0]));
1294 
1295  break;
1296 
1297  case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
1298 
1299  s1 = emit_load_s1(jd, iptr, REG_A0);
1300  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1301  /* implicit null-pointer check */
1302  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1303  s3 = emit_load_s3(jd, iptr, REG_A1);
1304 
1305  asme.mov(REG_A0, s1);
1306  asme.mov(REG_A1, s3);
1307 
1309  asme.ald(REG_PV, REG_PV, disp);
1310  asme.blr(REG_PV);
1311 
1312  disp = (s4) (cd->mcodeptr - cd->mcodebase);
1313  asme.lda(REG_PV, REG_RA, -disp);
1314  emit_arraystore_check(cd, iptr);
1315 
1316  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1317  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1318  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1319  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1320  asme.ast(s3, REG_ITMP1, OFFSET(java_objectarray_t, data[0]));
1321  break;
1322 
1323  case ICMD_GETFIELD: /* ... ==> ..., value */
1324 
1325  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1326 
1327  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1328  uf = iptr->sx.s23.s3.uf;
1329  fieldtype = uf->fieldref->parseddesc.fd->type;
1330  disp = 0;
1331 
1333  }
1334  else {
1335  fi = iptr->sx.s23.s3.fmiref->p.field;
1336  fieldtype = fi->type;
1337  disp = fi->offset;
1338  }
1339 
1340  if (IS_INT_LNG_TYPE(fieldtype))
1341  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1342  else
1343  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1344 
1345  /* implicit null-pointer check */
1346  switch (fieldtype) {
1347  case TYPE_INT:
1348  asme.ild(d, s1, disp);
1349  break;
1350  case TYPE_LNG:
1351  asme.lld(d, s1, disp);
1352  break;
1353  case TYPE_ADR:
1354  asme.ald(d, s1, disp);
1355  break;
1356  case TYPE_FLT:
1357  asme.fld(d, s1, disp);
1358  break;
1359  case TYPE_DBL:
1360  asme.dld(d, s1, disp);
1361  break;
1362  }
1363  asme.nop();
1364  emit_store_dst(jd, iptr, d);
1365  break;
1366 
1367  case ICMD_PUTFIELD: /* ..., objectref, value ==> ... */
1368 
1369  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1370 
1371  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1372  uf = iptr->sx.s23.s3.uf;
1373  fieldtype = uf->fieldref->parseddesc.fd->type;
1374  disp = 0;
1375  }
1376  else {
1377  uf = NULL;
1378  fi = iptr->sx.s23.s3.fmiref->p.field;
1379  fieldtype = fi->type;
1380  disp = fi->offset;
1381  }
1382 
1383  if (IS_INT_LNG_TYPE(fieldtype))
1384  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1385  else
1386  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
1387 
1388  if (INSTRUCTION_IS_UNRESOLVED(iptr))
1390 
1391  /* implicit null-pointer check */
1392  switch (fieldtype) {
1393  case TYPE_INT:
1394  asme.ist(s2, s1, disp);
1395  break;
1396  case TYPE_LNG:
1397  asme.lst(s2, s1, disp);
1398  break;
1399  case TYPE_ADR:
1400  asme.ast(s2, s1, disp);
1401  break;
1402  case TYPE_FLT:
1403  asme.fst(s2, s1, disp);
1404  break;
1405  case TYPE_DBL:
1406  asme.dst(s2, s1, disp);
1407  break;
1408  }
1409  asme.nop();
1410  break;
1411 
1412  /* branch operations **************************************************/
1413 
1414  case ICMD_IF_LEQ: /* ..., value ==> ... */
1415  case ICMD_IF_LNE:
1416  case ICMD_IF_LLT:
1417  case ICMD_IF_LGE:
1418  case ICMD_IF_LGT:
1419  case ICMD_IF_LLE:
1420 
1421  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1422 
1423  if (iptr->sx.val.l >= 0 && iptr->sx.val.l <= 0xfff) {
1424  asme.lcmp_imm(s1, iptr->sx.val.l);
1425  } else if ((-iptr->sx.val.l) >= 0 && (-iptr->sx.val.l) <= 0xfff) {
1426  asme.lcmn_imm(s1, -iptr->sx.val.l);
1427  } else {
1428  asme.lconst(REG_ITMP2, iptr->sx.val.l);
1429  asme.lcmp(s1, REG_ITMP2);
1430  }
1431 
1432  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_LEQ, BRANCH_OPT_NONE);
1433 
1434  break;
1435 
1436  case ICMD_IF_LCMPEQ: /* ..., value, value ==> ... */
1437  case ICMD_IF_LCMPNE: /* op1 = target JavaVM pc */
1438  case ICMD_IF_LCMPLT:
1439  case ICMD_IF_LCMPGT:
1440  case ICMD_IF_LCMPLE:
1441  case ICMD_IF_LCMPGE:
1442 
1443  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1444  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1445 
1446  asme.lcmp(s1, s2);
1447  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_LCMPEQ, BRANCH_OPT_NONE);
1448 
1449  break;
1450 
1451  case ICMD_ATHROW: /* ..., objectref ==> ... (, objectref) */
1452 
1454  break;
1455 
1456  case ICMD_BUILTIN: /* ..., arg1, arg2, arg3 ==> ... */
1457  bte = iptr->sx.s23.s3.bte;
1458  if (bte->stub == NULL)
1459  disp = dseg_add_functionptr(cd, bte->fp);
1460  else
1461  disp = dseg_add_functionptr(cd, bte->stub);
1462 
1463  asme.ald(REG_PV, REG_PV, disp); /* Pointer to built-in-function */
1464 
1465  /* generate the actual call */
1466  asme.blr(REG_PV);
1467  break;
1468 
1469  case ICMD_INVOKESPECIAL:
1470  emit_nullpointer_check(cd, iptr, REG_A0);
1471  /* fall-through */
1472 
1473  case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ... */
1474  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1475  um = iptr->sx.s23.s3.um;
1476  disp = dseg_add_unique_address(cd, um);
1477 
1479  um, disp);
1480  }
1481  else {
1482  lm = iptr->sx.s23.s3.fmiref->p.method;
1483  disp = dseg_add_address(cd, lm->stubroutine);
1484  }
1485 
1486  asme.ald(REG_PV, REG_PV, disp); /* method pointer in r27 */
1487 
1488  /* generate the actual call */
1489  asme.blr(REG_PV);
1490  break;
1491 
1492  case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
1493  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1494  um = iptr->sx.s23.s3.um;
1496 
1497  s1 = 0;
1498  }
1499  else {
1500  lm = iptr->sx.s23.s3.fmiref->p.method;
1501  s1 = OFFSET(vftbl_t, table[0]) + sizeof(methodptr) * lm->vftblindex;
1502  }
1503 
1504  /* implicit null-pointer check */
1505  asme.ald(REG_METHODPTR, REG_A0, OFFSET(java_object_t, vftbl));
1506  asme.ald(REG_PV, REG_METHODPTR, s1);
1507 
1508  /* generate the actual call */
1509  asme.blr(REG_PV);
1510 
1511  break;
1512 
1513  case ICMD_INVOKEINTERFACE:
1514  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1515  um = iptr->sx.s23.s3.um;
1517 
1518  s1 = 0;
1519  s2 = 0;
1520  }
1521  else {
1522  lm = iptr->sx.s23.s3.fmiref->p.method;
1523  s1 = OFFSET(vftbl_t, interfacetable[0]) -
1524  sizeof(methodptr*) * lm->clazz->index;
1525 
1526  s2 = sizeof(methodptr) * (lm - lm->clazz->methods);
1527  }
1528 
1529  /* implicit null-pointer check */
1530  asme.ald(REG_METHODPTR, REG_A0, OFFSET(java_object_t, vftbl));
1531 
1532  /* on aarch64 we only have negative offsets in the range of -255 to 255 so we need a mov */
1533  assert(abs(s1) <= 0xffff);
1534  assert(abs(s2) <= 0xffff);
1535  asme.lconst(REG_ITMP1, s1);
1536  asme.lconst(REG_ITMP3, s2);
1537 
1538  emit_ldr_reg(cd, REG_METHODPTR, REG_METHODPTR, REG_ITMP1); // TODO: move to emitter
1539  emit_ldr_reg(cd, REG_PV, REG_METHODPTR, REG_ITMP3); // TODO: move to emitter
1540 
1541  /* generate the actual call */
1542  asme.blr(REG_PV);
1543  break;
1544 
1545  case ICMD_TABLESWITCH: /* ..., index ==> ... */
1546 
1547  s4 i, l;
1548  branch_target_t *table;
1549 
1550  table = iptr->dst.table;
1551 
1552  l = iptr->sx.s23.s2.tablelow;
1553  i = iptr->sx.s23.s3.tablehigh;
1554 
1555  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1556  if (l == 0) {
1557  asme.imov(REG_ITMP1, s1); // TODO: check that this works
1558  } else if (abs(l) <= 32768) {
1559  if (l < 0) {
1560  asme.iadd_imm(REG_ITMP1, s1, -l);
1561  } else {
1562  asme.isub_imm(REG_ITMP1, s1, l);
1563  }
1564  } else {
1565  asme.iconst(REG_ITMP2, l);
1566  asme.isub(REG_ITMP1, s1, REG_ITMP2);
1567  }
1568 
1569  /* number of targets */
1570  i = i - l + 1;
1571 
1572  /* range check */
1573  emit_icmp_imm(cd, REG_ITMP1, i-1);
1574  emit_bcc(cd, table[0].block, BRANCH_UGT, BRANCH_OPT_NONE);
1575 
1576  /* build jump table top down and use address of lowest entry */
1577 
1578  table += i;
1579 
1580  while (--i >= 0) {
1581  dseg_add_target(cd, table->block);
1582  --table;
1583  }
1584 
1585  /* length of dataseg after last dseg_add_target is used by load */
1586  asme.sxtw(REG_ITMP1, REG_ITMP1);
1588  asme.ald(REG_ITMP2, REG_ITMP2, -(cd->dseglen));
1589  asme.br(REG_ITMP2);
1590  ALIGNCODENOP;
1591  break;
1592 
1593  case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
1594 
1595  if (!(iptr->flags.bits & INS_FLAG_ARRAY)) {
1596  // object type cast-check
1597 
1598  classinfo *super;
1599  s4 superindex;
1600 
1601  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1602  super = NULL;
1603  superindex = 0;
1604  }
1605  else {
1606  super = iptr->sx.s23.s3.c.cls;
1607  superindex = super->index;
1608  }
1609 
1610  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1611 
1612  // if class is not resolved, check which code to call
1613 
1614  if (super == NULL) {
1615  asme.lcmp_imm(s1, 0);
1616  emit_label_beq(cd, BRANCH_LABEL_1);
1617 
1618  disp = dseg_add_unique_s4(cd, 0); /* super->flags */
1619 
1621  iptr->sx.s23.s3.c.ref, disp);
1622 
1623  asme.ild(REG_ITMP2, REG_PV, disp);
1624 
1625  disp = dseg_add_s4(cd, ACC_INTERFACE);
1626  asme.ild(REG_ITMP3, REG_PV, disp);
1627 
1628  asme.ltst(REG_ITMP2, REG_ITMP3);
1629  emit_label_beq(cd, BRANCH_LABEL_2);
1630  }
1631 
1632  // interface checkcast code
1633 
1634  if ((super == NULL) || (super->flags & ACC_INTERFACE)) {
1635  if (super != NULL) {
1636  asme.lcmp_imm(s1, 0);
1637  emit_label_beq(cd, BRANCH_LABEL_3);
1638  }
1639 
1640  asme.ald(REG_ITMP2, s1, OFFSET(java_object_t, vftbl));
1641 
1642  if (super == NULL) {
1644  iptr->sx.s23.s3.c.ref, 0);
1645  }
1646 
1647  asme.ild(REG_ITMP3, REG_ITMP2,
1648  OFFSET(vftbl_t, interfacetablelength));
1649 
1650  assert(abs(superindex) <= 0xfff);
1651  asme.icmp_imm(REG_ITMP3, superindex);
1652  emit_classcast_check(cd, iptr, BRANCH_LE, REG_ITMP3, s1);
1653 
1654  s4 offset = (s4) (OFFSET(vftbl_t, interfacetable[0]) -
1655  superindex * sizeof(methodptr*));
1656 
1657  assert(abs(offset) <= 0xffff);
1658  asme.lconst(REG_ITMP3, offset);
1659  emit_ldr_reg(cd, REG_ITMP3, REG_ITMP2, REG_ITMP3); // TODO: mov this to emitter
1660  asme.lcmp_imm(REG_ITMP3, 0);
1661  emit_classcast_check(cd, iptr, BRANCH_EQ, REG_ITMP3, s1);
1662 
1663  if (super == NULL)
1665  else
1667  }
1668 
1669  // class checkcast code
1670 
1671  if ((super == NULL) || !(super->flags & ACC_INTERFACE)) {
1672  if (super == NULL) {
1674 
1675  disp = dseg_add_unique_address(cd, NULL);
1676 
1678  iptr->sx.s23.s3.c.ref, disp);
1679  }
1680  else {
1681  disp = dseg_add_address(cd, super->vftbl);
1682 
1683  asme.lcmp_imm(s1, 0);
1684  emit_label_beq(cd, BRANCH_LABEL_5);
1685  }
1686 
1687  asme.ald(REG_ITMP2, s1, OFFSET(java_object_t, vftbl));
1688  asme.ald(REG_ITMP3, REG_PV, disp);
1689 
1690  if (super == NULL || super->vftbl->subtype_depth >= DISPLAY_SIZE) {
1691  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_offset));
1693  asme.ald(REG_ITMP1, REG_ITMP1, 0);
1694 
1695  asme.lcmp(REG_ITMP1, REG_ITMP3);
1696  emit_label_beq(cd, BRANCH_LABEL_6); // good
1697 
1698  if (super == NULL) {
1699  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_offset));
1700  asme.icmp_imm(REG_ITMP1, OFFSET(vftbl_t, subtype_display[DISPLAY_SIZE]));
1701  emit_label_bne(cd, BRANCH_LABEL_10); // throw
1702  }
1703 
1704  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_depth));
1705  asme.ild(REG_ITMP3, REG_ITMP2, OFFSET(vftbl_t, subtype_depth));
1706  asme.icmp(REG_ITMP1, REG_ITMP3);
1707  emit_label_bgt(cd, BRANCH_LABEL_9); // throw
1708 
1709  // reload
1710  asme.ald(REG_ITMP3, REG_PV, disp);
1711  asme.ald(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, subtype_overflow));
1713  asme.ald(REG_ITMP1, REG_ITMP2, -DISPLAY_SIZE*8);
1714 
1715  asme.lcmp(REG_ITMP1, REG_ITMP3);
1716  emit_label_beq(cd, BRANCH_LABEL_7); // good
1717 
1719  if (super == NULL)
1721 
1722  // reload s1, might have been destroyed
1723  emit_load_s1(jd, iptr, REG_ITMP1);
1725 
1728  // reload s1, might have been destroyed
1729  emit_load_s1(jd, iptr, REG_ITMP1);
1730  }
1731  else {
1732  asme.ald(REG_ITMP2, REG_ITMP2, super->vftbl->subtype_offset);
1733 
1734  asme.lcmp(REG_ITMP2, REG_ITMP3);
1735  emit_classcast_check(cd, iptr, BRANCH_NE, REG_ITMP3, s1);
1736  }
1737 
1738  if (super != NULL)
1740  }
1741 
1742  if (super == NULL) {
1745  }
1746 
1747  d = codegen_reg_of_dst(jd, iptr, s1);
1748  }
1749  else {
1750  /* array type cast-check */
1751 
1752  s1 = emit_load_s1(jd, iptr, REG_A0);
1753  asme.imov(REG_A0, s1);
1754 
1755  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1756  disp = dseg_add_unique_address(cd, NULL);
1757 
1760  iptr->sx.s23.s3.c.ref,
1761  disp);
1762  }
1763  else
1764  disp = dseg_add_address(cd, iptr->sx.s23.s3.c.cls);
1765 
1766  asme.ald(REG_A1, REG_PV, disp);
1768  asme.ald(REG_PV, REG_PV, disp);
1769  asme.blr(REG_PV);
1770  disp = (s4) (cd->mcodeptr - cd->mcodebase);
1771  asme.lda(REG_PV, REG_RA, -disp);
1772 
1773  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1774  asme.ltst(REG_RESULT, REG_RESULT);
1775  emit_classcast_check(cd, iptr, BRANCH_EQ, REG_RESULT, s1);
1776 
1777  d = codegen_reg_of_dst(jd, iptr, s1);
1778  }
1779 
1780  asme.mov(d, s1);
1781  emit_store_dst(jd, iptr, d);
1782  break;
1783 
1784  case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
1785 
1786  {
1787  classinfo *super;
1788  vftbl_t *supervftbl;
1789  s4 superindex;
1790 
1791  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1792  super = NULL;
1793  superindex = 0;
1794  supervftbl = NULL;
1795 
1796  } else {
1797  super = iptr->sx.s23.s3.c.cls;
1798  superindex = super->index;
1799  supervftbl = super->vftbl;
1800  }
1801 
1802  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1803  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1804 
1805  if (s1 == d) {
1806  asme.mov(REG_ITMP1, s1);
1807  s1 = REG_ITMP1;
1808  }
1809 
1810  /* if class is not resolved, check which code to call */
1811 
1812  if (super == NULL) {
1813  asme.clr(d);
1814  asme.lcmp_imm(s1, 0);
1815  emit_label_beq(cd, BRANCH_LABEL_1);
1816 
1817  disp = dseg_add_unique_s4(cd, 0); /* super->flags */
1818 
1820  iptr->sx.s23.s3.c.ref, disp);
1821 
1822  asme.ild(REG_ITMP3, REG_PV, disp);
1824 
1825  asme.itst(REG_ITMP3, REG_ITMP2);
1826  emit_label_beq(cd, BRANCH_LABEL_2);
1827  }
1828 
1829  /* interface instanceof code */
1830 
1831  if ((super == NULL) || (super->flags & ACC_INTERFACE)) {
1832  if (super == NULL) {
1833  /* If d == REG_ITMP2, then it's destroyed in check
1834  code above. */
1835  if (d == REG_ITMP2)
1836  asme.clr(d);
1837 
1840  iptr->sx.s23.s3.c.ref, 0);
1841  } else {
1842  asme.clr(d);
1843  asme.lcmp_imm(s1, 0);
1844  emit_label_beq(cd, BRANCH_LABEL_3);
1845  }
1846 
1847  asme.ald(REG_ITMP1, s1, OFFSET(java_object_t, vftbl));
1848  asme.ild(REG_ITMP3, REG_ITMP1, OFFSET(vftbl_t, interfacetablelength));
1849  assert(abs(superindex) <= 0xfff);
1850  asme.icmp_imm(REG_ITMP3, superindex);
1851  asme.b_le(5);
1852 
1853  s4 offset = (s4) (OFFSET(vftbl_t, interfacetable[0]) -
1854  superindex * sizeof(methodptr*));
1855  assert(abs(offset) <= 0xffff);
1856  asme.lconst(REG_ITMP3, offset);
1858 
1859  asme.lcmp_imm(REG_ITMP1, 0);
1860  asme.cset(d, COND_NE); /* if REG_ITMP != 0 then d = 1 */
1861 
1862  if (super == NULL)
1864  else
1866  }
1867 
1868  /* class instanceof code */
1869 
1870  if ((super == NULL) || !(super->flags & ACC_INTERFACE)) {
1871  if (super == NULL) {
1873 
1874  disp = dseg_add_unique_address(cd, NULL);
1875 
1877  iptr->sx.s23.s3.c.ref,
1878  disp);
1879  }
1880  else {
1881  disp = dseg_add_address(cd, supervftbl);
1882 
1883  asme.clr(d);
1884  asme.lcmp_imm(s1, 0);
1885  emit_label_beq(cd, BRANCH_LABEL_5);
1886  }
1887 
1888  asme.ald(REG_ITMP2, s1, OFFSET(java_object_t, vftbl));
1889  asme.ald(REG_ITMP3, REG_PV, disp);
1890 
1891  if (super == NULL || super->vftbl->subtype_depth >= DISPLAY_SIZE) {
1892  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_offset));
1894  asme.ald(REG_ITMP1, REG_ITMP1, 0);
1895  asme.lcmp(REG_ITMP1, REG_ITMP3);
1896  emit_label_bne(cd, BRANCH_LABEL_8);
1897 
1898  asme.iconst(d, 1);
1899  emit_label_br(cd, BRANCH_LABEL_6); /* true */
1901 
1902  if (super == NULL) {
1903  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_offset));
1904  asme.icmp_imm(REG_ITMP1, OFFSET(vftbl_t, subtype_display[DISPLAY_SIZE]));
1905  emit_label_bne(cd, BRANCH_LABEL_10); /* false */
1906  }
1907 
1908  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_depth));
1909 
1910  asme.ild(REG_ITMP3, REG_ITMP2, OFFSET(vftbl_t, subtype_depth));
1911  asme.icmp(REG_ITMP1, REG_ITMP3);
1912  emit_label_bgt(cd, BRANCH_LABEL_9); /* false */
1913 
1914  /* reload */
1915  asme.ald(REG_ITMP3, REG_PV, disp);
1916  asme.ald(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, subtype_overflow));
1918  asme.ald(REG_ITMP1, REG_ITMP2, -DISPLAY_SIZE*8);
1919 
1920  asme.lcmp(REG_ITMP1, REG_ITMP3);
1921  asme.cset(d, COND_EQ);
1922 
1923  if (d == REG_ITMP2)
1926 
1927  if (super == NULL)
1929 
1930  if (d == REG_ITMP2) {
1931  asme.clr(d);
1933  }
1935 
1936  }
1937  else {
1938  asme.ald(REG_ITMP2, REG_ITMP2, super->vftbl->subtype_offset);
1939  asme.lcmp(REG_ITMP2, REG_ITMP3);
1940  asme.cset(d, COND_EQ);
1941  }
1942 
1943  if (super != NULL)
1945  }
1946 
1947  if (super == NULL) {
1950  }
1951 
1952  emit_store_dst(jd, iptr, d);
1953  }
1954  break;
1955 
1956  case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref */
1957 
1958  /* check for negative sizes and copy sizes to stack if necessary */
1959 
1960  MCODECHECK((iptr->s1.argcount << 1) + 64);
1961 
1962  for (s1 = iptr->s1.argcount; --s1 >= 0; ) {
1963 
1964  var = VAR(iptr->sx.s23.s2.args[s1]);
1965 
1966  /* copy SAVEDVAR sizes to stack */
1967 
1968  /* Already Preallocated? */
1969 
1970  if (!(var->flags & PREALLOC)) {
1971  s2 = emit_load(jd, iptr, var, REG_ITMP1);
1972  asme.lst(s2, REG_SP, s1 * 8);
1973  }
1974  }
1975 
1976  /* a0 = dimension count */
1977 
1978  asme.iconst(REG_A0, iptr->s1.argcount);
1979 
1980  /* is patcher function set? */
1981 
1982  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1983  disp = dseg_add_unique_address(cd, 0);
1984 
1986  iptr->sx.s23.s3.c.ref,
1987  disp);
1988  }
1989  else
1990  disp = dseg_add_address(cd, iptr->sx.s23.s3.c.cls);
1991 
1992  /* a1 = arraydescriptor */
1993 
1994  asme.ald(REG_A1, REG_PV, disp);
1995 
1996  /* a2 = pointer to dimensions = stack pointer */
1997 
1998  asme.mov(REG_A2, REG_SP);
1999 
2001  asme.ald(REG_PV, REG_PV, disp);
2002  asme.blr(REG_PV);
2003  disp = (s4) (cd->mcodeptr - cd->mcodebase);
2004  asme.lda(REG_PV, REG_RA, -disp);
2005 
2006  /* check for exception before result assignment */
2007 
2008  emit_exception_check(cd, iptr);
2009 
2010  d = codegen_reg_of_dst(jd, iptr, REG_RESULT);
2011  asme.imov(d, REG_RESULT);
2012  emit_store_dst(jd, iptr, d);
2013  break;
2014 
2015  default:
2016  os::abort("ICMD (%s, %d) not implemented yet on Aarch64!",
2017  icmd_table[iptr->opc].name, iptr->opc);
2018  }
2019 
2020  return;
2021 }
2022 
2023 
2024 /* codegen_emit_stub_native ****************************************************
2025 
2026  Emits a stub routine which calls a native method.
2027 
2028 *******************************************************************************/
2029 
2030 void codegen_emit_stub_native(jitdata *jd, methoddesc *nmd, functionptr f, int skipparams)
2031 {
2032  methodinfo *m;
2033  codeinfo *code;
2034  codegendata *cd;
2035  methoddesc *md;
2036  int i, j;
2037  int t;
2038  int s1, s2;
2039  int disp;
2040 
2041  /* get required compiler data */
2042 
2043  m = jd->m;
2044  code = jd->code;
2045  cd = jd->cd;
2046  AsmEmitter asme(cd);
2047 
2048  /* initialize variables */
2049 
2050  md = m->parseddesc;
2051 
2052  /* calculate stack frame size */
2053 
2054  cd->stackframesize =
2055  1 + /* return address */
2056  sizeof(stackframeinfo_t) / SIZEOF_VOID_P +
2057  sizeof(localref_table) / SIZEOF_VOID_P +
2058  1 + /* methodinfo for call trace */
2059  md->paramcount +
2060  nmd->memuse;
2061 
2062  /* create method header */
2063  u4 stackoffset = (cd->stackframesize * 8);
2064  stackoffset += stackoffset % 16;
2065 
2066  (void) dseg_add_unique_address(cd, code); /* CodeinfoPointer */
2067  (void) dseg_add_unique_s4(cd, stackoffset); /* FrameSize */
2068  (void) dseg_add_unique_s4(cd, 0); /* IsLeaf */
2069  (void) dseg_add_unique_s4(cd, 0); /* IntSave */
2070  (void) dseg_add_unique_s4(cd, 0); /* FltSave */
2071 
2072  /* generate stub code */
2073 
2074  asme.lda(REG_SP, REG_SP, -stackoffset);
2075  asme.lst(REG_RA, REG_SP, stackoffset - SIZEOF_VOID_P);
2076 
2077 #if defined(ENABLE_GC_CACAO)
2078  /* Save callee saved integer registers in stackframeinfo (GC may
2079  need to recover them during a collection). */
2080 
2081  disp = cd->stackframesize * 8 - SIZEOF_VOID_P - sizeof(stackframeinfo_t) +
2082  OFFSET(stackframeinfo_t, intregs);
2083 
2084  for (i = 0; i < INT_SAV_CNT; i++)
2085  asme.lst(abi_registers_integer_saved[i], REG_SP, disp + i * 8);
2086 #endif
2087 
2088  /* save integer and float argument registers */
2089 
2090  for (i = 0; i < md->paramcount; i++) {
2091  if (!md->params[i].inmemory) {
2092  s1 = md->params[i].regoff;
2093 
2094  switch (md->paramtypes[i].type) {
2095  case TYPE_INT:
2096  case TYPE_LNG:
2097  case TYPE_ADR:
2098  asme.lst(s1, REG_SP, i * 8);
2099  break;
2100  case TYPE_FLT:
2101  asme.fst(s1, REG_SP, i * 8);
2102  break;
2103  case TYPE_DBL:
2104  asme.dst(s1, REG_SP, i * 8);
2105  break;
2106  default:
2107  assert(false);
2108  break;
2109  }
2110  }
2111  }
2112 
2113  /* prepare data structures for native function call */
2114 
2115  asme.mov(REG_A0, REG_SP);
2116  asme.mov(REG_A1, REG_PV);
2118  asme.lld(REG_PV, REG_PV, disp);
2119  asme.blr(REG_PV);
2120  disp = (s4) (cd->mcodeptr - cd->mcodebase);
2121  asme.lda(REG_PV, REG_RA, -disp);
2122 
2123  /* remember class argument */
2124 
2125  if (m->flags & ACC_STATIC)
2126  asme.mov(REG_ITMP3, REG_RESULT);
2127 
2128  /* restore integer and float argument registers */
2129 
2130  for (i = 0; i < md->paramcount; i++) {
2131  if (!md->params[i].inmemory) {
2132  s1 = md->params[i].regoff;
2133 
2134  switch (md->paramtypes[i].type) {
2135  case TYPE_INT:
2136  case TYPE_LNG:
2137  case TYPE_ADR:
2138  asme.lld(s1, REG_SP, i * 8);
2139  break;
2140  case TYPE_FLT:
2141  asme.fld(s1, REG_SP, i * 8);
2142  break;
2143  case TYPE_DBL:
2144  asme.dld(s1, REG_SP, i * 8);
2145  break;
2146  default:
2147  assert(false);
2148  break;
2149  }
2150  }
2151  }
2152 
2153  /* copy or spill arguments to new locations */
2154 
2155  for (i = md->paramcount - 1, j = i + skipparams; i >= 0; i--, j--) {
2156  t = md->paramtypes[i].type;
2157 
2158  if (IS_INT_LNG_TYPE(t)) {
2159  if (!md->params[i].inmemory) {
2160  s1 = md->params[i].regoff;
2161  s2 = nmd->params[j].regoff;
2162 
2163  if (!nmd->params[j].inmemory)
2164  asme.mov(s2, s1);
2165  else
2166  asme.lst(s1, REG_SP, s2);
2167  }
2168  else {
2169  s1 = md->params[i].regoff + stackoffset;
2170  s2 = nmd->params[j].regoff;
2171  asme.lld(REG_ITMP1, REG_SP, s1);
2172  asme.lst(REG_ITMP1, REG_SP, s2);
2173  }
2174  }
2175  else {
2176  if (!md->params[i].inmemory) {
2177  s1 = md->params[i].regoff;
2178  s2 = nmd->params[j].regoff;
2179 
2180  if (!nmd->params[j].inmemory)
2181  if (IS_2_WORD_TYPE(t))
2182  asme.dmov(s2, s1);
2183  else
2184  asme.fmov(s2, s1);
2185  else {
2186  if (IS_2_WORD_TYPE(t))
2187  asme.dst(s1, REG_SP, s2);
2188  else
2189  asme.fst(s1, REG_SP, s2);
2190  }
2191  }
2192  else {
2193  s1 = md->params[i].regoff + stackoffset;
2194  s2 = nmd->params[j].regoff;
2195  if (IS_2_WORD_TYPE(t)) {
2196  asme.dld(REG_FTMP1, REG_SP, s1);
2197  asme.dst(REG_FTMP1, REG_SP, s2);
2198  }
2199  else {
2200  asme.fld(REG_FTMP1, REG_SP, s1);
2201  asme.fst(REG_FTMP1, REG_SP, s2);
2202  }
2203  }
2204  }
2205  }
2206 
2207  /* Handle native Java methods. */
2208 
2209  if (m->flags & ACC_NATIVE) {
2210  /* put class into second argument register */
2211 
2212  if (m->flags & ACC_STATIC)
2213  asme.mov(REG_A1, REG_ITMP3);
2214 
2215  /* put env into first argument register */
2216 
2217  disp = dseg_add_address(cd, VM::get_current()->get_jnienv());
2218  asme.lld(REG_A0, REG_PV, disp);
2219  }
2220 
2221  /* Call the native function. */
2222 
2223  disp = dseg_add_functionptr(cd, f);
2224  asme.lld(REG_PV, REG_PV, disp);
2225  asme.blr(REG_PV); /* call native method */
2226  disp = (s4) (cd->mcodeptr - cd->mcodebase);
2227  asme.lda(REG_PV, REG_RA, -disp); /* recompute pv from ra */
2228 
2229  /* save return value */
2230 
2231  switch (md->returntype.type) {
2232  case TYPE_INT:
2233  case TYPE_LNG:
2234  case TYPE_ADR:
2235  asme.lst(REG_RESULT, REG_SP, 0 * 8);
2236  break;
2237  case TYPE_FLT:
2238  asme.fst(REG_FRESULT, REG_SP, 0 * 8);
2239  break;
2240  case TYPE_DBL:
2241  asme.dst(REG_FRESULT, REG_SP, 0 * 8);
2242  break;
2243  case TYPE_VOID:
2244  break;
2245  default:
2246  assert(false);
2247  break;
2248  }
2249 
2250  /* remove native stackframe info */
2251 
2252  asme.mov(REG_A0, REG_SP);
2253  asme.mov(REG_A1, REG_PV);
2255  asme.lld(REG_PV, REG_PV, disp);
2256  asme.blr(REG_PV);
2257  disp = (s4) (cd->mcodeptr - cd->mcodebase);
2258  asme.lda(REG_PV, REG_RA, -disp);
2259  asme.mov(REG_ITMP1_XPTR, REG_RESULT);
2260 
2261  /* restore return value */
2262 
2263  switch (md->returntype.type) {
2264  case TYPE_INT:
2265  case TYPE_LNG:
2266  case TYPE_ADR:
2267  asme.lld(REG_RESULT, REG_SP, 0 * 8);
2268  break;
2269  case TYPE_FLT:
2270  asme.fld(REG_FRESULT, REG_SP, 0 * 8);
2271  break;
2272  case TYPE_DBL:
2273  asme.dld(REG_FRESULT, REG_SP, 0 * 8);
2274  break;
2275  case TYPE_VOID:
2276  break;
2277  default:
2278  assert(false);
2279  break;
2280  }
2281 
2282 #if defined(ENABLE_GC_CACAO)
2283  /* Restore callee saved integer registers from stackframeinfo (GC
2284  might have modified them during a collection). */
2285  os::abort("NOT IMPLEMENTED YET!");
2286 
2287  disp = cd->stackframesize * 8 - SIZEOF_VOID_P - sizeof(stackframeinfo_t) +
2288  OFFSET(stackframeinfo_t, intregs);
2289 
2290  for (i = 0; i < INT_SAV_CNT; i++)
2291  asme.lld(abi_registers_integer_saved[i], REG_SP, disp + i * 8);
2292 #endif
2293 
2294  asme.lld(REG_RA, REG_SP, stackoffset - 8); /* get RA */
2295  asme.lda(REG_SP, REG_SP, stackoffset);
2296 
2297  /* check for exception */
2298 
2299 
2300  asme.cbnz(REG_ITMP1_XPTR, 2); /* if no exception then return */
2301  asme.ret(); /* return to caller */
2302 
2303  /* handle exception */
2304 
2305  asme.lsub_imm(REG_ITMP2_XPC, REG_RA, 4); /* get exception address */
2306 
2308 }
2309 
2310 
2311 /*
2312  * These are local overrides for various environment variables in Emacs.
2313  * Please do not remove this and leave it at the end of the file, where
2314  * Emacs will automagically detect them.
2315  * ---------------------------------------------------------------------
2316  * Local variables:
2317  * mode: c++
2318  * indent-tabs-mode: t
2319  * c-basic-offset: 4
2320  * tab-width: 4
2321  * End:
2322  * vim:noexpandtab:sw=4:ts=4:
2323  */
void ladd(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:210
void strh(u1 wt, u1 xn, s4 imm)
Definition: codegen.hpp:127
void codegen_emit_instruction(jitdata *jd, instruction *iptr)
Generates machine code for one ICMD.
Definition: codegen.cpp:218
s4 dseg_add_double(codegendata *cd, double value)
Definition: dseg.cpp:465
#define REG_SP
Definition: md-abi.hpp:55
val_operand_t val
void f2d(u1 dd, u1 sn)
Definition: codegen.hpp:322
#define BUILTIN_FAST_canstore
Definition: builtin.hpp:153
void fdiv(u1 st, u1 sn, u1 sm)
Definition: codegen.hpp:303
#define PATCHER_resolve_classref_to_flags
s4 emit_load_s3(jitdata *jd, instruction *iptr, s4 tempreg)
void lmsub(u1 xd, u1 xn, u1 xm, u1 xa)
Definition: codegen.hpp:232
basicblock * block
union varinfo::@19 vv
#define REG_PV
Definition: md-abi.hpp:42
s4 emit_load_s1(jitdata *jd, instruction *iptr, s4 tempreg)
Definition: emit-common.cpp:63
void lcmp_imm(u1 xd, u2 imm)
Definition: codegen.hpp:96
void icmp(u1 wn, u1 wm)
Definition: codegen.hpp:100
void llsl_imm(u1 xd, u1 xn, u1 shift)
Definition: codegen.hpp:236
#define BRANCH_OPT_NONE
#define PATCHER_invokeinterface
#define REG_A1
Definition: md-abi.hpp:36
void i2d(u1 dt, u1 wn)
Definition: codegen.hpp:314
Definition: jit.hpp:126
#define REG_A0
Definition: md-abi.hpp:35
paramdesc * params
Definition: descriptor.hpp:164
#define BRANCH_LE
void ast(u1 xt, u1 xn, s4 imm)
Definition: codegen.hpp:122
#define CODE_LSL
Definition: emit-asm.hpp:42
#define emit_ldr_reg(cd, Xt, Xn, Xm)
Definition: emit-asm.hpp:186
#define BRANCH_LABEL_7
Definition: emit-common.hpp:53
void fmov(u1 sd, u1 sn)
Definition: codegen.hpp:290
s4 dseg_add_unique_address(codegendata *cd, void *value)
Definition: dseg.cpp:525
int * savintregs
Definition: reg.hpp:71
void uxth(u1 wd, u1 wn)
Definition: codegen.hpp:254
methodinfo * methods
Definition: class.hpp:113
#define PATCHER_resolve_classref_to_vftbl
#define BUILTIN_multianewarray
Definition: builtin.hpp:201
#define IS_INT_LNG_TYPE(a)
Definition: global.hpp:130
#define BRANCH_NE
void fneg(u1 sd, u1 sn)
Definition: codegen.hpp:293
void land(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:268
void strb(u1 wt, u1 xn, s4 imm)
Definition: codegen.hpp:128
void ior(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:270
void llsl(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:251
#define PATCHER_get_putfield
void ilsl(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:250
#define BRANCH_EQ
s4 dseg_add_address(codegendata *cd, void *value)
Definition: dseg.cpp:542
codeinfo * code
Definition: jit.hpp:128
#define BRANCH_LABEL_5
Definition: emit-common.hpp:51
#define REG_FRESULT
Definition: md-abi.hpp:61
void lxor(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:274
int32_t argcount
Definition: instruction.hpp:64
void emit_bcc(codegendata *cd, basicblock *target, s4 condition, u4 options)
void lcmn_imm(u1 xd, u2 imm)
Definition: codegen.hpp:98
void lconst(u1 xt, s8 value)
Definition: codegen.hpp:151
#define dseg_add_functionptr(cd, value)
Definition: dseg.hpp:39
codegendata * cd
Definition: jit.hpp:129
void codegen_emit_stub_native(jitdata *jd, methoddesc *nmd, functionptr f, int skipparams)
Definition: codegen.cpp:2030
#define BRANCH_LABEL_10
Definition: emit-common.hpp:56
const char * name
Definition: icmd.hpp:393
typedef void(JNICALL *jvmtiEventSingleStep)(jvmtiEnv *jvmti_env
void ladd_shift(u1 xd, u1 xn, u1 xm, u1 shift, u1 amount)
Xd = Xn + shift(Xm, amount);.
Definition: codegen.hpp:213
void lmul(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:228
#define REG_ITMP1_XPTR
Definition: md-abi.hpp:50
#define REG_A2
Definition: md-abi.hpp:37
void icmp_imm(u1 wd, u2 imm)
Definition: codegen.hpp:95
void ixor(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:273
void idiv(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:224
void itst(u1 wn, u1 wm)
Definition: codegen.hpp:261
void emit_arraystore_check(codegendata *cd, instruction *iptr)
Definition: emit.cpp:380
int savintreguse
Definition: reg.hpp:88
patchref_t * patcher_add_patch_ref(jitdata *jd, functionptr patcher, void *ref, s4 disp)
constant_classref * ref
Definition: references.hpp:62
void sxtw(u1 xd, u1 wn)
Definition: codegen.hpp:257
void dseg_add_target(codegendata *cd, basicblock *target)
Definition: dseg.cpp:565
void cbnz(u1 xn, s4 imm)
Definition: codegen.hpp:202
u1 * methodptr
Definition: global.hpp:40
void i2f(u1 st, u1 wn)
Definition: codegen.hpp:312
#define BRANCH_LABEL_6
Definition: emit-common.hpp:52
void llsr_imm(u1 xd, u1 xn, u1 shift)
Definition: codegen.hpp:239
java_object_t * codegen_finish_native_call(u1 *sp, u1 *pv)
void fld(u1 xt, u1 xn, s4 imm)
Definition: codegen.hpp:111
void blr(u1 xn)
Definition: codegen.hpp:198
void lst(u1 xt, u1 xn, s4 imm)
Definition: codegen.hpp:121
void dmov(u1 dd, u1 dn)
Definition: codegen.hpp:291
u1 * stub
Definition: builtin.hpp:64
void dmul(u1 dt, u1 dn, u1 dm)
Definition: codegen.hpp:301
void ald(u1 xt, u1 xn, s4 imm)
Definition: codegen.hpp:109
void ist(u1 xt, u1 xn, s4 imm)
Definition: codegen.hpp:120
void dcmp(u1 xn, u1 xm)
Definition: codegen.hpp:298
void dadd(u1 dt, u1 dn, u1 dm)
Definition: codegen.hpp:307
const s4 abi_registers_integer_saved[]
Definition: md-abi.cpp:75
void d2l(u1 xd, u1 dn)
Definition: codegen.hpp:320
#define VAR(i)
Definition: jit.hpp:259
Definition: reg.hpp:43
static int code_is_leafmethod(codeinfo *code)
Definition: code.hpp:150
#define BUILTIN_arraycheckcast
Definition: builtin.hpp:148
#define REG_ITMP2_XPC
Definition: md-abi.hpp:51
void iadd_imm(u1 xd, u1 xn, u4 imm)
Definition: codegen.hpp:206
s4 dseg_add_s4(codegendata *cd, s4 value)
Definition: dseg.cpp:246
#define ALIGNCODENOP
Definition: codegen.hpp:47
s4 regoff
Definition: reg.hpp:47
void(* functionptr)(void)
Definition: global.hpp:39
typedesc paramtypes[1]
Definition: descriptor.hpp:167
void ubfx(u1 wd, u1 xn)
Definition: codegen.hpp:258
#define INT_SAV_CNT
Definition: md-abi.hpp:75
java_handle_t * codegen_start_native_call(u1 *sp, u1 *pv)
#define PATCHER_instanceof_interface
#define IS_2_WORD_TYPE(a)
Definition: global.hpp:132
void b_vs(s4 imm)
Definition: codegen.hpp:189
void emit_exception_check(codegendata *cd, instruction *iptr)
Definition: emit.cpp:447
void l2d(u1 dt, u1 xn)
Definition: codegen.hpp:315
void sxtb(u1 wd, u1 wn)
Definition: codegen.hpp:255
s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
void ilsl_imm(u1 wd, u1 wn, u1 shift)
Definition: codegen.hpp:235
BeginInst *& block
#define COND_GT
Definition: emit-asm.hpp:62
classref_or_classinfo c
void ladd_imm(u1 xd, u1 xn, u4 imm)
Definition: codegen.hpp:207
void fcmp(u1 sn, u1 sm)
Definition: codegen.hpp:296
void cset(u1 xt, u1 cond)
Definition: codegen.hpp:282
u1 * stubroutine
Definition: method.hpp:102
s4 vftblindex
Definition: method.hpp:81
void ilsr(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:247
#define COND_EQ
Definition: emit-asm.hpp:50
void llsr(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:248
void icsneg(u1 wd, u1 wn, u1 wm, u1 cond)
Definition: codegen.hpp:286
void dneg(u1 dd, u1 dn)
Definition: codegen.hpp:294
dst_operand_t dst
void ldrsb32(u1 wt, u1 xn, s4 imm)
Definition: codegen.hpp:117
flags_operand_t flags
void dld(u1 xt, u1 xn, s4 imm)
Definition: codegen.hpp:112
void f2i(u1 wd, u1 sn)
Definition: codegen.hpp:317
void imul(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:227
constant_FMIref * fieldref
Definition: resolve.hpp:88
int32_t offset
Definition: field.hpp:66
classinfo * clazz
Definition: method.hpp:80
void emit_label_br(codegendata *cd, s4 label)
#define OFFSET(s, el)
Definition: memory.hpp:90
#define BRANCH_LABEL_3
Definition: emit-common.hpp:49
void ldrsh32(u1 wt, u1 xn, s4 imm)
Definition: codegen.hpp:118
s4 emit_load_s2(jitdata *jd, instruction *iptr, s4 tempreg)
Definition: emit-common.cpp:82
void b_le(s4 imm)
Definition: codegen.hpp:196
void ilsr_imm(u1 wd, u1 wn, u1 shift)
Definition: codegen.hpp:238
void l2f(u1 st, u1 xn)
Definition: codegen.hpp:313
void lsub_imm(u1 xd, u1 xn, u4 imm)
Definition: codegen.hpp:218
void imsub(u1 wd, u1 wn, u1 wm, u1 wa)
Definition: codegen.hpp:231
s4 flags
Definition: class.hpp:90
typedesc * fd
Definition: references.hpp:74
s4 * local_map
Definition: jit.hpp:153
void emit_trap(codegendata *cd, u1 Xd, int type)
Definition: emit-asm.hpp:582
#define REG_FTMP2
Definition: md-abi.hpp:68
MIIterator i
s4 emit_load(jitdata *jd, instruction *iptr, varinfo *src, s4 tempreg)
Definition: emit.cpp:66
#define BRANCH_UGT
typedesc returntype
Definition: descriptor.hpp:166
#define BRANCH_LABEL_4
Definition: emit-common.hpp:50
int32_t s4
Definition: types.hpp:45
#define BRANCH_LABEL_9
Definition: emit-common.hpp:55
s4 dseg_add_unique_s4(codegendata *cd, s4 value)
Definition: dseg.cpp:229
int * savfltregs
Definition: reg.hpp:73
registerdata * rd
Definition: jit.hpp:130
void isub_imm(u1 xd, u1 xn, u4 imm)
Definition: codegen.hpp:217
s4 index
Definition: class.hpp:116
void fsub(u1 st, u1 sn, u1 sm)
Definition: codegen.hpp:309
union instruction::@12 sx
void fmul(u1 st, u1 sn, u1 sm)
Definition: codegen.hpp:300
#define REG_RA
Definition: md-abi.hpp:41
void emit_arithmetic_check(codegendata *cd, instruction *iptr, s4 reg)
Definition: emit.cpp:346
static void abort()
Definition: os.hpp:196
#define PATCHER_checkcast_interface
void ldiv(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:225
int savfltreguse
Definition: reg.hpp:91
bool inmemory
Definition: descriptor.hpp:151
void ldrh(u1 wt, u1 xn, s4 imm)
Definition: codegen.hpp:114
void iadd(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:209
icmdtable_entry_t icmd_table[256]
Definition: icmd.cpp:60
#define REG_ITMP2
Definition: md-abi.hpp:47
void emit_icmp_imm(codegendata *cd, int reg, int32_t value)
Emits code comparing a single register.
Definition: emit.cpp:243
void emit_store_dst(jitdata *jd, instruction *iptr, s4 d)
void lor(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:271
s1_operand_t s1
uint32_t u4
Definition: types.hpp:46
basicblock * block
Definition: instruction.hpp:50
#define PATCHER_invokestatic_special
#define FLT_SAV_CNT
Definition: md-abi.hpp:82
#define BRANCH_LABEL_1
Definition: emit-common.hpp:47
vftbl_t * vftbl
Definition: class.hpp:121
void ild(u1 xt, u1 xn, s4 imm)
Definition: codegen.hpp:107
void lda(u1 xd, u1 xn, s4 imm)
Definition: codegen.hpp:177
methoddesc * parseddesc
Definition: method.hpp:78
#define COND_NE
Definition: emit-asm.hpp:51
#define REG_FTMP1
Definition: md-abi.hpp:67
#define PATCHER_invokevirtual
Definition: builtin.hpp:60
void f2l(u1 xd, u1 sn)
Definition: codegen.hpp:318
methodinfo * m
Definition: jit.hpp:127
void imov(u1 wd, u1 wn)
Definition: codegen.hpp:92
void fadd(u1 st, u1 sn, u1 sm)
Definition: codegen.hpp:306
void dsub(u1 dt, u1 dn, u1 dm)
Definition: codegen.hpp:310
static bool IS_INMEMORY(s4 flags)
Definition: stack.hpp:51
s4 type
Definition: field.hpp:60
void codegen_emit_epilog(jitdata *jd)
Generates machine code for the method epilog.
Definition: codegen.cpp:174
void iasr_imm(u1 wd, u1 wn, u1 shift)
Definition: codegen.hpp:241
void nop()
Definition: codegen.hpp:104
#define s3
Definition: md-asm.hpp:71
#define BRANCH_LABEL_8
Definition: emit-common.hpp:54
s4 flags
Definition: reg.hpp:45
void mov(u1 xt, u1 xn)
Definition: codegen.hpp:93
void iconst(u1 xt, s4 value)
Definition: codegen.hpp:135
void emit_classcast_check(codegendata *cd, instruction *iptr, s4 condition, s4 reg, s4 s1)
Definition: emit.cpp:396
#define REG_METHODPTR
Definition: md-abi.hpp:43
#define COND_HI
Definition: emit-asm.hpp:58
int8_t s1
Definition: types.hpp:39
void lasr_imm(u1 xd, u1 xn, u1 shift)
Definition: codegen.hpp:242
int16_t s2
Definition: types.hpp:42
#define INSTRUCTION_IS_UNRESOLVED(iptr)
void isub(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:220
void iasr(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:244
struct instruction::@12::@13 s23
#define REG_ZERO
Definition: md-abi.hpp:56
void codegen_emit_prolog(jitdata *jd)
Generates machine code for the method prolog.
Definition: codegen.cpp:73
void lld(u1 xt, u1 xn, s4 imm)
Definition: codegen.hpp:108
#define REG_FTMP3
Definition: md-abi.hpp:69
void ltst(u1 xn, u1 xm)
Definition: codegen.hpp:260
void clr(u1 xd)
Definition: codegen.hpp:276
const parseddesc_t parseddesc
Definition: references.hpp:105
void ret()
Definition: codegen.hpp:200
#define REG_ITMP3
Definition: md-abi.hpp:48
#define PATCHER_resolve_classref_to_classinfo
#define MCODECHECK(icnt)
Definition: codegen.hpp:40
void iand(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:267
void lsub(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:221
#define BRANCH_LABEL_2
Definition: emit-common.hpp:48
functionptr fp
Definition: builtin.hpp:63
void emit_label(codegendata *cd, s4 label)
void lcmp(u1 xn, u1 xm)
Definition: codegen.hpp:101
s4 flags
Definition: method.hpp:70
void br(u1 xn)
Definition: codegen.hpp:199
void lasr(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:245
void emit_arrayindexoutofbounds_check(codegendata *cd, instruction *iptr, s4 s1, s4 s2)
Definition: emit.cpp:362
void icsel(u1 wt, u1 wn, u1 wm, u1 cond)
Definition: codegen.hpp:280
#define abs(x)
Definition: codegen.hpp:378
#define COND_PL
Definition: emit-asm.hpp:55
void d2i(u1 wd, u1 dn)
Definition: codegen.hpp:319
uint32_t regoff
Definition: descriptor.hpp:153
s4 dseg_add_float(codegendata *cd, float value)
Definition: dseg.cpp:392
branch_target_t * table
#define REG_RESULT
Definition: md-abi.hpp:33
void fst(u1 xt, u1 xn, s4 imm)
Definition: codegen.hpp:124
void sxth(u1 wd, u1 wn)
Definition: codegen.hpp:256
void ddiv(u1 dt, u1 dn, u1 dm)
Definition: codegen.hpp:304
void d2f(u1 sd, u1 dn)
Definition: codegen.hpp:323
void emit_nullpointer_check(codegendata *cd, instruction *iptr, s4 reg)
Definition: emit.cpp:431
#define REG_ITMP1
Definition: md-abi.hpp:46
static VM * get_current()
Definition: vm.hpp:99
void dst(u1 xt, u1 xn, s4 imm)
Definition: codegen.hpp:125