CACAO
Aarch64Backend.cpp
Go to the documentation of this file.
1 /* src/vm/jit/compiler2/aachr64/Aarch64Backend.cpp
2 
3  Copyright (C) 2013
4  CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5 
6  This file is part of CACAO.
7 
8  This program is free software; you can redistribute it and/or
9  modify it under the terms of the GNU General Public License as
10  published by the Free Software Foundation; either version 2, or (at
11  your option) any later version.
12 
13  This program is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with this program; if not, write to the Free Software
20  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21  02110-1301, USA.
22 
23 */
24 
37 #include "vm/jit/PatcherNew.hpp"
38 #include "vm/jit/jit.hpp"
39 #include "vm/jit/code.hpp"
40 #include "vm/class.hpp"
41 #include "vm/field.hpp"
42 
43 #include "md-trap.hpp"
44 
45 #include "toolbox/OStream.hpp"
46 #include "toolbox/logging.hpp"
47 
48 #define DEBUG_NAME "compiler2/aarch64"
49 
50 namespace cacao {
51 namespace jit {
52 namespace compiler2 {
53 
54 // BackendBase must be specialized in namespace compiler2!
55 using namespace aarch64;
56 
57 template<>
58 const char* BackendBase<Aarch64>::get_name() const {
59  return "aarch64";
60 }
61 
62 namespace {
63 template <class I,class Seg>
64 static void write_data(Seg seg, I data) {
65  assert(seg.size() == sizeof(I));
66 
67  for (int i = 0, e = sizeof(I) ; i < e ; ++i) {
68  seg[i] = (u1) 0xff & *(reinterpret_cast<u1*>(&data) + i);
69  }
70 }
71 
72 inline bool is_floatingpoint(Type::TypeID type) {
73  return type == Type::DoubleTypeID || type == Type::FloatTypeID;
74 }
75 
76 }
77 
78 template<>
80  MachineOperand* dst) const {
81  Type::TypeID type = dst->get_type();
82 
83  assert(type == src->get_type());
84  assert(!(src->is_stackslot() && dst->is_stackslot()));
85 
86  if (src->is_Immediate()) {
87  switch (type) {
88  case Type::CharTypeID:
89  case Type::ByteTypeID:
90  case Type::ShortTypeID:
91  case Type::IntTypeID:
92  return new MovImmInst(DstOp(dst), SrcOp(src), Type::IntTypeID);
93  case Type::LongTypeID:
95  return new MovImmInst(DstOp(dst), SrcOp(src), Type::LongTypeID);
96  case Type::FloatTypeID:
97  {
98  // TODO: check if this is the correct way of using the DSEG
99  float imm = src->to_Immediate()->get_Float();
100  DataSegment &ds = get_JITData()
102  DataFragment data = ds.get_Ref(sizeof(float));
103  DataSegment::IdxTy idx = ds.insert_tag(DSFloat(imm), data);
104  write_data<float>(data, imm);
105  MachineInstruction *dseg =
106  new DsegAddrInst(DstOp(dst), idx, Type::FloatTypeID);
107  return dseg;
108  }
109  case Type::DoubleTypeID:
110  {
111  // TODO: check if this is the correct way of using the DSEG
112  double imm = src->to_Immediate()->get_Double();
113  DataSegment &ds = get_JITData()
115  DataFragment data = ds.get_Ref(sizeof(double));
116  DataSegment::IdxTy idx = ds.insert_tag(DSDouble(imm), data);
117  write_data<double>(data, imm);
118  MachineInstruction *dseg =
119  new DsegAddrInst(DstOp(dst), idx, Type::DoubleTypeID);
120  return dseg;
121  }
122  default:
123  break;
124  }
125  }
126 
127  if (src->is_Register() && dst->is_Register()) {
128  switch (type) {
129  case Type::CharTypeID:
130  case Type::ByteTypeID:
131  case Type::ShortTypeID:
132  case Type::IntTypeID:
133  case Type::LongTypeID:
135  return new MovInst(DstOp(dst), SrcOp(src), type);
136 
137  case Type::FloatTypeID:
138  case Type::DoubleTypeID:
139  return new FMovInst(DstOp(dst), SrcOp(src), type);
140 
141  default:
142  break;
143  }
144  }
145 
146  if (src->is_Register() && (dst->is_StackSlot() || dst->is_ManagedStackSlot())) {
147  return new aarch64::StoreInst(SrcOp(src), DstOp(dst), type);
148  }
149 
150  if (dst->is_Register() && (src->is_StackSlot() || src->is_ManagedStackSlot())) {
151  return new aarch64::LoadInst(DstOp(dst), SrcOp(src), type);
152  }
153 
154  ABORT_MSG("aarch64: Move not implemented",
155  "Inst: " << src << " -> " << dst << " type: " << type);
156  return NULL;
157 }
158 
159 template<>
161  return new JumpInst(target);
162 }
163 
164 namespace {
165 
166 template <unsigned size, class T>
167 inline T align_to(T val) {
168  T rem =(val % size);
169  return val + ( rem == 0 ? 0 : size - rem);
170 }
171 
172 } // end anonymous namespace
173 
174 
175 template<>
177  StackSlotManager* ssm) const {
178  // Stackframe size + FP + LR
179  int frameSize = align_to<16>(ssm->get_frame_size() + 16);
180  EnterInst enter(frameSize);
181  enter.emit(cm);
182 }
183 
184 void Aarch64LoweringVisitor::visit(LOADInst *I, bool copyOperands) {
185  assert(I);
186 
188  const MachineMethodDescriptor mmd(md);
189 
190  Type::TypeID type = I->get_type();
191  VirtualRegister *dst = new VirtualRegister(type);
192  MachineInstruction *move = NULL;
193 
194  MachineOperand *src_op = mmd[I->get_index()];
195  if (src_op->is_Register()) {
196  move = get_Backend()->create_Move(src_op, dst);
197  } else {
198  switch (type) {
199  case Type::ByteTypeID:
200  case Type::ShortTypeID:
201  case Type::IntTypeID:
202  move = new LoadInst(DstOp(dst), SrcOp(src_op), Type::IntTypeID);
203  break;
204  case Type::LongTypeID:
206  case Type::DoubleTypeID:
207  move = new LoadInst(DstOp(dst), SrcOp(src_op), type);
208  break;
209  default:
210  ABORT_MSG("aarch64 type not supported: ", I << " type: " << type);
211  }
212  }
213  get_current()->push_back(move);
214  set_op(I,move->get_result().op);
215 }
216 
217 void Aarch64LoweringVisitor::visit(CMPInst *I, bool copyOperands) {
218  assert(I);
219  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
220  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
221  Type::TypeID type = I->get_operand(0)->get_type();
222  assert(type == I->get_operand(1)->get_type());
223 
227 
228  if (!is_floatingpoint(type)) {
229  ABORT_MSG("aarch64: Lowering not supported",
230  "Inst: " << I << " type: " << type);
231  }
232 
234  new FCmpInst(SrcOp(src_op1), SrcOp(src_op2), type));
235 
237  new MovImmInst(DstOp(dst), SrcOp(new Immediate(0, Type::IntType())),
238  Type::IntTypeID));
240  new MovImmInst(DstOp(tmp1), SrcOp(new Immediate(1, Type::IntType())),
241  Type::IntTypeID));
243  new MovImmInst(DstOp(tmp2), SrcOp(new Immediate(-1, Type::IntType())),
244  Type::IntTypeID));
245 
246  MachineInstruction *csel = NULL;
247  if (I->get_FloatHandling() == CMPInst::L) {
248  /* set to -1 if less than or unordered (NaN) */
249  /* set to 1 if greater than */
250  csel = new CSelInst(DstOp(tmp1), SrcOp(tmp1), SrcOp(tmp2),
252  } else if (I->get_FloatHandling() == CMPInst::G) {
253  /* set to 1 if greater than or unordered (NaN) */
254  /* set to -1 if less than */
255  csel = new CSelInst(DstOp(tmp1), SrcOp(tmp1), SrcOp(tmp2),
257  } else {
258  assert(0);
259  }
260  get_current()->push_back(csel);
261 
262  /* set to 0 if equal or result of previous csel */
264  new CSelInst(DstOp(dst), SrcOp(dst), SrcOp(tmp1),
266  set_op(I, dst);
267 }
268 
269 void Aarch64LoweringVisitor::visit(IFInst *I, bool copyOperands) {
270  assert(I);
271  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
272  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
273  Type::TypeID type = I->get_type();
274  MachineInstruction* cmp;
275 
276  switch (type) {
277  case Type::ByteTypeID:
278  case Type::IntTypeID:
279  case Type::LongTypeID:
280  {
281  cmp = new CmpInst(SrcOp(src_op1), SrcOp(src_op2), type);
282 
283  MachineInstruction* cjmp = NULL;
284  BeginInstRef& then = I->get_then_target();
285  BeginInstRef& els = I->get_else_target();
286 
287  switch (I->get_condition()) {
288  case Conditional::EQ:
289  cjmp = new CondJumpInst(Cond::EQ, get(then.get()),get(els.get()));
290  break;
291  case Conditional::LT:
292  cjmp = new CondJumpInst(Cond::LT, get(then.get()),get(els.get()));
293  break;
294  case Conditional::LE:
295  cjmp = new CondJumpInst(Cond::LE, get(then.get()),get(els.get()));
296  break;
297  case Conditional::GE:
298  cjmp = new CondJumpInst(Cond::GE, get(then.get()),get(els.get()));
299  break;
300  case Conditional::GT:
301  cjmp = new CondJumpInst(Cond::GT, get(then.get()),get(els.get()));
302  break;
303  case Conditional::NE:
304  cjmp = new CondJumpInst(Cond::NE, get(then.get()),get(els.get()));
305  break;
306  default:
307  ABORT_MSG("aarch64 Conditional not supported: ", I << "cond: "
308  << I->get_condition());
309  }
310  get_current()->push_back(cmp);
311  get_current()->push_back(cjmp);
312 
313  set_op(I, cjmp->get_result().op);
314  return;
315  }
316  default:
317  break;
318  }
319  ABORT_MSG("aarch64: Lowering not supported",
320  "Inst: " << I << " type: " << type);
321 }
322 
323 void Aarch64LoweringVisitor::visit(NEGInst *I, bool copyOperands) {
324  assert(I);
325  Type::TypeID type = I->get_type();
327  VirtualRegister* dst = new VirtualRegister(type);
328 
329  MachineInstruction* neg;
330 
331  switch (type) {
332  case Type::IntTypeID:
333  case Type::LongTypeID:
334  neg = new NegInst(DstOp(dst), SrcOp(src), type);
335  break;
336 
337  case Type::FloatTypeID:
338  case Type::DoubleTypeID:
339  neg = new FNegInst(DstOp(dst), SrcOp(src), type);
340  break;
341 
342  default:
343  ABORT_MSG("aarch64: Lowering not supported",
344  "Inst: " << I << " type: " << type);
345  }
346  get_current()->push_back(neg);
347  set_op(I, neg->get_result().op);
348 }
349 
350 void Aarch64LoweringVisitor::visit(ADDInst *I, bool copyOperands) {
351  assert(I);
352  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
353  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
354  assert(src_op1->is_Register() && src_op2->is_Register());
355 
356  Type::TypeID type = I->get_type();
357  VirtualRegister* dst = new VirtualRegister(type);
358  MachineInstruction* inst;
359 
360  switch (type) {
361  case Type::IntTypeID:
362  case Type::LongTypeID:
363  inst = new AddInst(DstOp(dst), SrcOp(src_op1), SrcOp(src_op2), type);
364  break;
365 
366  case Type::FloatTypeID:
367  case Type::DoubleTypeID:
368  inst = new FAddInst(DstOp(dst), SrcOp(src_op1), SrcOp(src_op2), type);
369  break;
370 
371  default:
372  ABORT_MSG("aarch64: Lowering not supported",
373  "Inst: " << I << " type: " << type);
374  }
375 
376  get_current()->push_back(inst);
377  set_op(I, inst->get_result().op);
378 }
379 
380 void Aarch64LoweringVisitor::visit(ANDInst *I, bool copyOperands) {
381  assert(I);
382  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
383  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
384  Type::TypeID type = I->get_type();
385  VirtualRegister* dst = new VirtualRegister(type);
386  MachineInstruction* inst;
387 
388  switch (type) {
389  case Type::IntTypeID:
390  case Type::LongTypeID:
391  inst = new AndInst(DstOp(dst), SrcOp(src_op1), SrcOp(src_op2), type);
392  break;
393 
394  default:
395  ABORT_MSG("aarch64: Lowering not supported",
396  "Inst: " << I << " type: " << type);
397  }
398 
399  get_current()->push_back(inst);
400  set_op(I, inst->get_result().op);
401 }
402 
403 void Aarch64LoweringVisitor::visit(SUBInst *I, bool copyOperands) {
404  assert(I);
405  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
406  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
407  Type::TypeID type = I->get_type();
408  VirtualRegister* dst = new VirtualRegister(type);
409  MachineInstruction* inst;
410 
411  switch (type) {
412  case Type::IntTypeID:
413  case Type::LongTypeID:
414  inst = new SubInst(DstOp(dst), SrcOp(src_op1), SrcOp(src_op2), type);
415  break;
416 
417  case Type::FloatTypeID:
418  case Type::DoubleTypeID:
419  inst = new FSubInst(DstOp(dst), SrcOp(src_op1), SrcOp(src_op2), type);
420  break;
421 
422  default:
423  ABORT_MSG("aarch64: Lowering not supported",
424  "Inst: " << I << " type: " << type);
425  }
426 
427  get_current()->push_back(inst);
428  set_op(I, inst->get_result().op);
429 }
430 
431 void Aarch64LoweringVisitor::visit(MULInst *I, bool copyOperands) {
432  assert(I);
433  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
434  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
435  Type::TypeID type = I->get_type();
436  VirtualRegister* dst = new VirtualRegister(type);
437  MachineInstruction* inst;
438 
439  switch (type) {
440  case Type::IntTypeID:
441  case Type::LongTypeID:
442  inst = new MulInst(DstOp(dst), SrcOp(src_op1), SrcOp(src_op2), type);
443  break;
444 
445  case Type::FloatTypeID:
446  case Type::DoubleTypeID:
447  inst = new FMulInst(DstOp(dst), SrcOp(src_op1), SrcOp(src_op2), type);
448  break;
449 
450  default:
451  ABORT_MSG("aarch64: Lowering not supported",
452  "Inst: " << I << " type: " << type);
453  }
454 
455  get_current()->push_back(inst);
456  set_op(I, inst->get_result().op);
457 }
458 
459 void Aarch64LoweringVisitor::visit(DIVInst *I, bool copyOperands) {
460  assert(I);
461  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
462  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
463  Type::TypeID type = I->get_type();
464  VirtualRegister* dst = new VirtualRegister(type);
465  MachineInstruction* inst;
466 
467  switch (type) {
468  case Type::IntTypeID:
469  case Type::LongTypeID:
470  inst = new DivInst(DstOp(dst), SrcOp(src_op1), SrcOp(src_op2), type);
471  break;
472 
473  case Type::FloatTypeID:
474  case Type::DoubleTypeID:
475  inst = new FDivInst(DstOp(dst), SrcOp(src_op1), SrcOp(src_op2), type);
476  break;
477 
478  default:
479  ABORT_MSG("aarch64: Lowering not supported",
480  "Inst: " << I << " type: " << type);
481  }
482 
483  get_current()->push_back(inst);
484  set_op(I, inst->get_result().op);
485 }
486 
487 void Aarch64LoweringVisitor::visit(REMInst *I, bool copyOperands) {
488  assert(I);
489 
490  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
491  Type::TypeID type = I->get_type();
492  MachineOperand *dividend = get_op(I->get_operand(0)->to_Instruction());
494  MachineOperand *dst = new VirtualRegister(I->get_type());
495  MachineInstruction *div;
496  MachineInstruction *msub;
497 
498  switch (type) {
499  case Type::IntTypeID:
500  div = new DivInst(DstOp(tmp), SrcOp(dividend), SrcOp(src_op2), type);
501  msub = new MulSubInst(DstOp(dst), SrcOp(tmp), SrcOp(src_op2),
502  SrcOp(dividend), type);
503  get_current()->push_back(div);
504  get_current()->push_back(msub);
505  set_op(I, msub->get_result().op);
506  break;
507 
508  default:
509  ABORT_MSG("aarch64: Lowering not supported",
510  "Inst: " << I << " type: " << type);
511  }
512 }
513 
514 void Aarch64LoweringVisitor::visit(ALOADInst *I, bool copyOperands) {
515  assert(I);
516  Instruction* ref_inst = I->get_operand(0)->to_Instruction();
517 
518  MachineOperand* src_ref = get_op(ref_inst->get_operand(0)->to_Instruction());
519  MachineOperand* src_index = get_op(ref_inst->get_operand(1)->to_Instruction());
520  assert(src_ref->get_type() == Type::ReferenceTypeID);
521  assert(src_index->get_type() == Type::IntTypeID);
522 
523  Type::TypeID type = I->get_type();
524  MachineOperand *vreg = new VirtualRegister(type);
525  //MachineOperand *base = new VirtualRegister(Type::LongTypeID);
527  Immediate *imm;
528 
529  s4 offset = 0;
530  u1 shift = 0;
531  switch (type) {
532  case Type::ByteTypeID:
533  offset = OFFSET(java_bytearray_t, data[0]);
534  break;
535  case Type::ShortTypeID:
536  offset = OFFSET(java_shortarray_t, data[0]);
537  shift = 1;
538  break;
539  case Type::CharTypeID:
540  offset = OFFSET(java_chararray_t, data[0]);
541  shift = 1;
542  break;
543  case Type::IntTypeID:
544  offset = OFFSET(java_intarray_t, data[0]);
545  shift = 2;
546  break;
547  case Type::LongTypeID:
548  offset = OFFSET(java_longarray_t, data[0]);
549  shift = 3;
550  break;
552  offset = OFFSET(java_objectarray_t, data[0]);
553  shift = 3;
554  break;
555  case Type::FloatTypeID:
556  offset = OFFSET(java_floatarray_t, data[0]);
557  shift = 2;
558  break;
559  case Type::DoubleTypeID:
560  offset = OFFSET(java_doublearray_t, data[0]);
561  shift = 3;
562  break;
563  default:
564  ABORT_MSG("aarch64 Lowering not supported",
565  "Inst: " << I << " type: " << type);
566  }
567  imm = new Immediate(offset, Type::IntType());
568 
570  new AddInst(DstOp(base), SrcOp(src_ref), SrcOp(src_index),
571  Type::LongTypeID, Shift::LSL, shift));
573  new LoadInst(DstOp(vreg), BaseOp(base), IdxOp(imm), type));
574  set_op(I, vreg);
575 }
576 
577 void Aarch64LoweringVisitor::visit(ASTOREInst *I, bool copyOperands) {
578  assert(I);
579  Instruction* ref_inst = I->get_operand(0)->to_Instruction();
580 
581  MachineOperand* src_ref = get_op(ref_inst->get_operand(0)->to_Instruction());
582  MachineOperand* src_index = get_op(ref_inst->get_operand(1)->to_Instruction());
583  MachineOperand* src_value = get_op(I->get_operand(1)->to_Instruction());
584  assert(src_ref->get_type() == Type::ReferenceTypeID);
585  assert(src_index->get_type() == Type::IntTypeID);
586 
587  //MachineOperand *base = new VirtualRegister(Type::LongTypeID);
589  Immediate *imm;
590  Type::TypeID type = src_value->get_type();
591 
592  s4 offset = 0;
593  u1 shift = 0;
594  switch (type) {
595  case Type::ByteTypeID:
596  offset = OFFSET(java_bytearray_t, data[0]);
597  break;
598  case Type::ShortTypeID:
599  offset = OFFSET(java_shortarray_t, data[0]);
600  shift = 1;
601  break;
602  case Type::CharTypeID:
603  offset = OFFSET(java_chararray_t, data[0]);
604  shift = 1;
605  break;
606  case Type::IntTypeID:
607  offset = OFFSET(java_intarray_t, data[0]);
608  shift = 2;
609  break;
610  case Type::LongTypeID:
611  offset = OFFSET(java_longarray_t, data[0]);
612  shift = 3;
613  break;
615  offset = OFFSET(java_objectarray_t, data[0]);
616  shift = 3;
617  break;
618  case Type::FloatTypeID:
619  offset = OFFSET(java_floatarray_t, data[0]);
620  shift = 2;
621  break;
622  case Type::DoubleTypeID:
623  offset = OFFSET(java_doublearray_t, data[0]);
624  shift = 3;
625  break;
626  default:
627  ABORT_MSG("aarch64 Lowering not supported",
628  "Inst: " << I << " type: " << type);
629  }
630  imm = new Immediate(offset, Type::IntType());
632  new AddInst(DstOp(base), SrcOp(src_ref), SrcOp(src_index),
633  Type::LongTypeID, Shift::LSL, shift));
635  new StoreInst(SrcOp(src_value), BaseOp(base), IdxOp(imm), type));
636 }
637 
639  assert(I);
640 
641  // Implicit null-checks are handled via deoptimization.
643 
644  MachineOperand* src_op = get_op(I->get_operand(0)->to_Instruction());
645  assert(I->get_type() == Type::IntTypeID);
646  assert(src_op->get_type() == Type::ReferenceTypeID);
649 
650  MachineInstruction *load =
651  new LoadInst(DstOp(vreg), BaseOp(src_op), IdxOp(imm), Type::IntTypeID);
652  get_current()->push_back(load);
653  set_op(I, load->get_result().op);
654 }
655 
657  assert(I);
658  MachineOperand* src_ref = get_op(I->get_operand(0)->to_Instruction());
659  MachineOperand* src_index = get_op(I->get_operand(1)->to_Instruction());
660  assert(src_ref->get_type() == Type::ReferenceTypeID);
661  assert(src_index->get_type() == Type::IntTypeID);
662 
663  // Implicit null-checks are handled via deoptimization.
665 
666  // load array length
669 
670  MachineInstruction *load =
671  new LoadInst(DstOp(len), BaseOp(src_ref), IdxOp(imm), Type::IntTypeID);
672  get_current()->push_back(load);
673 
674  // compare with index
675  CmpInst *cmp = new CmpInst(SrcOp(src_index), SrcOp(len), Type::IntTypeID);
676  get_current()->push_back(cmp);
677 
678  // throw exception if index is out of bounds
680  get_current()->push_back(trap);
681 }
682 
683 void Aarch64LoweringVisitor::visit(RETURNInst *I, bool copyOperands) {
684  assert(I);
685  Type::TypeID type = I->get_type();
686  MachineOperand* src_op = type == Type::VoidTypeID ?
687  0 : get_op(I->get_operand(0)->to_Instruction());
688 
689  MachineInstruction *mov = NULL;
690  LeaveInst *leave = new LeaveInst();
691 
692  RetInst *ret = NULL;
693 
694  switch (type) {
695  case Type::CharTypeID:
696  case Type::ByteTypeID:
697  case Type::ShortTypeID:
698  case Type::IntTypeID:
699  case Type::LongTypeID:
701  {
702  MachineOperand *ret_reg = new NativeRegister(type, &R0);
703  mov = new MovInst(DstOp(ret_reg), SrcOp(src_op), type);
704  break;
705  }
706  case Type::FloatTypeID:
707  case Type::DoubleTypeID:
708  {
709  MachineOperand *ret_reg = new NativeRegister(type, &V0);
710  mov = new FMovInst(DstOp(ret_reg), SrcOp(src_op), type);
711  break;
712  }
713  case Type::VoidTypeID:
714  break;
715 
716  default:
717  ABORT_MSG("aarch64: Lowering not supported",
718  "Inst: " << I << " type: " << type);
719  }
720 
721 
722  if (type != Type::VoidTypeID) {
723  get_current()->push_back(mov);
724  set_op(I, mov->get_result().op);
725  ret = new RetInst(SrcOp(mov->get_result().op));
726  } else {
727  ret = new RetInst();
728  }
729  get_current()->push_back(leave);
730  get_current()->push_back(ret);
731 }
732 
733 void Aarch64LoweringVisitor::visit(CASTInst *I, bool copyOperands) {
734  MachineOperand* src_op = get_op(I->get_operand(0)->to_Instruction());
736  Type::TypeID to = I->get_type();
737 
738  switch (from) {
739  case Type::LongTypeID:
740  {
741  switch (to) {
742  case Type::IntTypeID:
743  {
744  MachineInstruction *conv =
745  new LongToIntInst(DstOp(new VirtualRegister(to)),
746  SrcOp(src_op));
747  get_current()->push_back(conv);
748  set_op(I, conv->get_result().op);
749  return;
750  }
751  case Type::DoubleTypeID:
752  {
753  MachineInstruction *conv = new IntToFpInst(
754  DstOp(new VirtualRegister(to)), SrcOp(src_op), to, from);
755  get_current()->push_back(conv);
756  set_op(I, conv->get_result().op);
757  return;
758  }
759  default:
760  break;
761  }
762  break;
763  }
764  case Type::IntTypeID:
765  {
766  switch (to) {
767  case Type::ByteTypeID:
768  {
769  MachineInstruction *conv =
771  SrcOp(src_op), from);
772  get_current()->push_back(conv);
773  set_op(I, conv->get_result().op);
774  return;
775  }
776  case Type::ShortTypeID:
777  {
778  MachineInstruction *conv =
780  SrcOp(src_op), from);
781  get_current()->push_back(conv);
782  set_op(I, conv->get_result().op);
783  return;
784  }
785  case Type::CharTypeID:
786  {
787  MachineInstruction *conv =
788  new IntToCharInst(DstOp(new VirtualRegister(to)), SrcOp(src_op));
789  get_current()->push_back(conv);
790  set_op(I, conv->get_result().op);
791  return;
792  }
793  case Type::LongTypeID:
794  {
795  MachineInstruction *conv =
796  new IntToLongInst(DstOp(new VirtualRegister(to)), SrcOp(src_op));
797  get_current()->push_back(conv);
798  set_op(I, conv->get_result().op);
799  return;
800  }
801  case Type::FloatTypeID:
802  case Type::DoubleTypeID:
803  {
804  MachineInstruction *conv =
805  new IntToFpInst(DstOp(new VirtualRegister(to)), SrcOp(src_op),
806  to, from);
807  get_current()->push_back(conv);
808  set_op(I, conv->get_result().op);
809  return;
810  }
811  default:
812  break;
813  break;
814  }
815  }
816  case Type::DoubleTypeID:
817  {
818  switch (to) {
819  case Type::FloatTypeID:
820  {
821  MachineInstruction *conv =
822  new FcvtInst(DstOp(new VirtualRegister(to)), SrcOp(src_op),
823  to, from);
824  get_current()->push_back(conv);
825  set_op(I, conv->get_result().op);
826  return;
827  }
828  default:
829  break;
830  break;
831  }
832  }
833  case Type::FloatTypeID:
834  {
835  switch (to) {
836  case Type::DoubleTypeID:
837  {
838  MachineInstruction *conv =
839  new FcvtInst(DstOp(new VirtualRegister(to)), SrcOp(src_op),
840  to, from);
841  get_current()->push_back(conv);
842  set_op(I, conv->get_result().op);
843  return;
844  }
845  default:
846  break;
847  break;
848  }
849  }
850  default:
851  break;
852  }
853  ABORT_MSG("aarch64: Cast not supported!", "From " << from << " to " << to );
854 }
855 
856 void Aarch64LoweringVisitor::visit(INVOKEInst *I, bool copyOperands) {
857  assert(I);
858  Type::TypeID type = I->get_type();
860  MachineMethodDescriptor MMD(MD);
862 
863  // operands for the call
865  MachineOperand *result = &NoOperand;
866 
867  // get return value
868  switch (type) {
869  case Type::IntTypeID:
870  case Type::LongTypeID:
872  result = new NativeRegister(type,&R0);
873  break;
874  case Type::FloatTypeID:
875  case Type::DoubleTypeID:
876  result = new NativeRegister(type,&V0);
877  break;
878  case Type::VoidTypeID:
879  break;
880  default:
881  ABORT_MSG("x86_64 Lowering not supported",
882  "Inst: " << I << " type: " << type);
883  }
884 
885  // create call
886  MachineInstruction* call = new CallInst(SrcOp(addr),DstOp(result),I->op_size());
887  // move values to parameters
888  int arg_counter = 0;
889  for (std::size_t i = 0; i < I->op_size(); ++i ) {
890  MachineOperand *arg_dst = MMD[i];
891  if (arg_dst->is_StackSlot()) {
892  arg_dst = SSM->create_argument_slot(arg_dst->get_type(), arg_counter++);
893  }
894 
897  arg_dst
898  );
899  get_current()->push_back(mov);
900  // set call operand
901  call->set_operand(i+1,arg_dst);
902  }
903 
904  // Source state for replacement point instructions
905  // Concrete class depends on INVOKE type
906  SourceStateInst *source_state = I->get_SourceStateInst();
907  assert(source_state);
909 
910  if (I->to_INVOKESTATICInst() || I->to_INVOKESPECIALInst()) {
912  DataSegment::IdxTy idx = DS.get_index(DSFMIRef(I->get_fmiref()));
913  if (DataSegment::is_invalid(idx)) {
914  DataFragment data = DS.get_Ref(sizeof(void*));
915  idx = DS.insert_tag(DSFMIRef(I->get_fmiref()), data);
916  }
917 
918  DataFragment datafrag = DS.get_Ref(idx, sizeof(void*));
919  methodinfo* callee = I->get_fmiref()->p.method;
920  write_data<void*>(datafrag, callee->code->entrypoint);
921 
922  DsegAddrInst* mov = new DsegAddrInst(DstOp(addr), idx, Type::ReferenceTypeID);
923  get_current()->push_back(mov);
924 
925  MI = new MachineReplacementPointStaticSpecialInst(call, source_state->get_source_location(), source_state->op_size(), idx);
926  } else if (I->to_INVOKEVIRTUALInst()) {
927  // Implicit null-checks are handled via deoptimization.
929 
930  methodinfo* callee = I->get_fmiref()->p.method;
931  int32_t s1 = OFFSET(vftbl_t, table[0]) + sizeof(methodptr) * callee->vftblindex;
933  MachineOperand *receiver = get_op(I->get_operand(0)->to_Instruction());
934  MachineOperand *vftbl_offset = new Immediate(OFFSET(java_object_t, vftbl), Type::IntType());
935  MachineInstruction *load_vftbl_address = new LoadInst(DstOp(vftbl_address), BaseOp(receiver), IdxOp(vftbl_offset), Type::LongTypeID);
936  get_current()->push_back(load_vftbl_address);
937 
938  MachineOperand *method_offset = new Immediate(s1, Type::IntType());
939  MachineInstruction *load_method_address = new LoadInst(DstOp(addr), BaseOp(vftbl_address), IdxOp(method_offset), Type::LongTypeID);
940  get_current()->push_back(load_method_address);
941 
942  MI = new MachineReplacementPointCallSiteInst(call, source_state->get_source_location(), source_state->op_size());
943  } else if (I->to_INVOKEINTERFACEInst()) {
944  // Implicit null-checks are handled via deoptimization.
946 
947  methodinfo* callee = I->get_fmiref()->p.method;
948  int32_t s1 = OFFSET(vftbl_t, interfacetable[0]) - sizeof(methodptr) * callee->clazz->index;
950  MachineOperand *receiver = get_op(I->get_operand(0)->to_Instruction());
951 
952  MachineOperand *vftbl_offset = new Immediate(OFFSET(java_object_t, vftbl), Type::IntType());
953  MachineInstruction *load_vftbl_address = new LoadInst(DstOp(vftbl_address), BaseOp(receiver), IdxOp(vftbl_offset), Type::LongTypeID);
954  get_current()->push_back(load_vftbl_address);
955 
956  VirtualRegister *interface_address = new VirtualRegister(Type::ReferenceTypeID);
957  MachineOperand *interface_offset = new Immediate(s1, Type::IntType());
958  MachineInstruction *load_interface_address = new LoadInst(DstOp(interface_address), BaseOp(vftbl_address), IdxOp(interface_offset), Type::LongTypeID);
959  get_current()->push_back(load_interface_address);
960 
961  int32_t s2 = sizeof(methodptr) * (callee - callee->clazz->methods);
962  MachineOperand *method_offset = new Immediate(s2, Type::IntType());
963  MachineInstruction *load_method_address = new LoadInst(DstOp(addr), BaseOp(interface_address), IdxOp(method_offset), Type::LongTypeID);
964  get_current()->push_back(load_method_address);
965 
966  MI = new MachineReplacementPointCallSiteInst(call, source_state->get_source_location(), source_state->op_size());
967  } else if (I->to_BUILTINInst()) {
968  Immediate *method_address = new Immediate(reinterpret_cast<s8>(I->to_BUILTINInst()->get_address()),
970  MachineInstruction *mov = get_Backend()->create_Move(method_address, addr);
971  get_current()->push_back(mov);
972 
973  MI = new MachineReplacementPointCallSiteInst(call, source_state->get_source_location(), source_state->op_size());
974  }
975 
976  // add replacement point
977  lower_source_state_dependencies(MI, source_state);
978  get_current()->push_back(MI);
979 
980  // add call
981  get_current()->push_back(call);
982 
983  // get result
984  if (result != &NoOperand) {
985  MachineOperand *dst = new VirtualRegister(type);
986  MachineInstruction *reg = get_Backend()->create_Move(result, dst);
987  get_current()->push_back(reg);
988  set_op(I,reg->get_result().op);
989  }
990 }
991 
993  visit(static_cast<INVOKEInst*>(I), copyOperands);
994 }
995 
997  visit(static_cast<INVOKEInst*>(I), copyOperands);
998 }
999 
1001  visit(static_cast<INVOKEInst*>(I), copyOperands);
1002 }
1003 
1005  visit(static_cast<INVOKEInst*>(I), copyOperands);
1006 }
1007 
1008 void Aarch64LoweringVisitor::visit(BUILTINInst *I, bool copyOperands) {
1009  visit(static_cast<INVOKEInst*>(I), copyOperands);
1010 }
1011 
1012 void Aarch64LoweringVisitor::visit(GETFIELDInst *I, bool copyOperands) {
1013  assert(I);
1014 
1015  // Implicit null-checks are handled via deoptimization.
1017 
1018  MachineOperand* objectref = get_op(I->get_operand(0)->to_Instruction());
1019  MachineOperand *result = new VirtualRegister(I->get_type());
1020  Immediate *field_offset = new Immediate(reinterpret_cast<s4>(I->get_field()->offset),
1021  Type::IntType());
1022 
1023  MachineInstruction *read_field = new LoadInst(
1024  DstOp(result), BaseOp(objectref), IdxOp(field_offset), I->get_type());
1025  get_current()->push_back(read_field);
1026  set_op(I, read_field->get_result().op);
1027 }
1028 
1029 void Aarch64LoweringVisitor::visit(PUTFIELDInst *I, bool copyOperands) {
1030  assert(I);
1031 
1032  // Implicit null-checks are handled via deoptimization.
1034 
1035  MachineOperand *objectref = get_op(I->get_operand(0)->to_Instruction());
1036  MachineOperand *value = get_op(I->get_operand(1)->to_Instruction());
1037  Immediate *field_offset = new Immediate(reinterpret_cast<s4>(I->get_field()->offset),
1038  Type::IntType());
1039 
1040  MachineInstruction *write_field = new StoreInst(
1041  SrcOp(value), BaseOp(objectref), IdxOp(field_offset), I->get_type());
1042  get_current()->push_back(write_field);
1043  set_op(I, write_field->get_result().op);
1044 }
1045 
1046 void Aarch64LoweringVisitor::visit(GETSTATICInst *I, bool copyOperands) {
1047  assert(I);
1049  Immediate *field_address_imm = new Immediate(reinterpret_cast<s8>(I->get_field()->value),
1051  MachineInstruction *mov = get_Backend()->create_Move(field_address_imm, field_address);
1052  get_current()->push_back(mov);
1053 
1054  MachineOperand *vreg = new VirtualRegister(I->get_type());
1055  Immediate *imm = new Immediate(0, Type::IntType());
1056  MachineInstruction *load = new LoadInst(
1057  DstOp(vreg), BaseOp(field_address), IdxOp(imm), I->get_type());
1058 
1059  get_current()->push_back(load);
1060  set_op(I, load->get_result().op);
1061 }
1062 
1063 void Aarch64LoweringVisitor::visit(PUTSTATICInst *I, bool copyOperands) {
1065  Immediate *field_address_imm = new Immediate(reinterpret_cast<s8>(I->get_field()->value),
1067  MachineInstruction *mov = get_Backend()->create_Move(field_address_imm, field_address);
1068  get_current()->push_back(mov);
1069 
1070  MachineOperand *value = get_op(I->get_operand(0)->to_Instruction());
1071  Immediate *imm = new Immediate(0, Type::IntType());
1072  MachineInstruction *write_field = new StoreInst(
1073  SrcOp(value), BaseOp(field_address), IdxOp(imm), I->get_type());
1074  get_current()->push_back(write_field);
1075  set_op(I, write_field->get_result().op);
1076 }
1077 
1079  assert(I);
1080  MachineOperand* src_op = get_op(I->get_operand(0)->to_Instruction());
1081  Type::TypeID type = I->get_type();
1082 
1085  e = I->match_end(); i != e; ++i) {
1086  // create compare
1087  Immediate *imm = new Immediate(*i, Type::IntType());
1088  MachineOperand *cmpOp = imm;
1089  if (*i < 0 || *i > 4095) {
1090  cmpOp = new VirtualRegister(type);
1091  MachineInstruction *mov = get_Backend()->create_Move(imm, cmpOp);
1092  get_current()->push_back(mov);
1093  }
1094 
1095  MachineInstruction *cmp = NULL;
1096  switch (type) {
1097  case Type::IntTypeID:
1098  cmp = new CmpInst(SrcOp(src_op), SrcOp(cmpOp), type);
1099  break;
1100  default:
1101  UNIMPLEMENTED_MSG("aarch64: LOOKUPSWITCHInst type: " << type);
1102  }
1103  get_current()->push_back(cmp);
1104  // create new block
1105  MachineBasicBlock *then_block = get(s->get());
1106  MachineBasicBlock *else_block = new_block();
1107  assert(else_block);
1108  else_block->insert_pred(get_current());
1109  else_block->push_front(new MachineLabelInst());
1110  // create cond jump
1111  MachineInstruction *cjmp = new CondJumpInst(Cond::EQ, then_block, else_block);
1112  get_current()->push_back(cjmp);
1113  // set current
1114  set_current(else_block);
1115  ++s;
1116  }
1117 
1118  // default
1119  MachineInstruction *jmp = new JumpInst(get(s->get()));
1120  get_current()->push_back(jmp);
1121  assert(++s == I->succ_end());
1122 
1123  set_op(I,jmp->get_result().op);
1124 }
1125 
1127  Type::TypeID type = I->get_type();
1128  ABORT_MSG("aarch64: Lowering not supported",
1129  "Inst: " << I << " type: " << type);
1130 }
1131 
1132 void Aarch64LoweringVisitor::visit(CHECKNULLInst *I, bool copyOperands) {
1133  // TODO: Implement me
1134 }
1135 
1136 void Aarch64LoweringVisitor::visit(AREFInst *I, bool copyOperands) {
1137  // DO NOTHING
1138 }
1139 
1141  assert(I);
1142 
1143  SourceStateInst *source_state = I->get_source_state();
1144  assert(source_state);
1146  source_state->get_source_location(), source_state->op_size());
1147  lower_source_state_dependencies(MI, source_state);
1148  get_current()->push_back(MI);
1149 
1151  MachineInstruction *deoptimize_trap = new TrapInst(TRAP_DEOPTIMIZE, SrcOp(methodptr));
1152  get_current()->push_back(deoptimize_trap);
1153 }
1154 
1156  ABORT_MSG("Rule not supported", "Rule "
1157  << ruleId << " is not supported by method lowerComplex!");
1158 }
1159 
1160 } // end namespace compiler2
1161 } // end namespace jit
1162 } // end namespace cacao
1163 
1164 
1165 /*
1166  * These are local overrides for various environment variables in Emacs.
1167  * Please do not remove this and leave it at the end of the file, where
1168  * Emacs will automagically detect them.
1169  * ---------------------------------------------------------------------
1170  * Local variables:
1171  * mode: c++
1172  * indent-tabs-mode: t
1173  * c-basic-offset: 4
1174  * tab-width: 4
1175  * End:
1176  * vim:noexpandtab:sw=4:ts=4:
1177  */
void set_op(Instruction *I, MachineOperand *op) const
Definition: Backend.hpp:106
static const COND EQ
Equal (Z == 1)
Definition: Aarch64Cond.hpp:44
void set_current(MachineBasicBlock *MBB)
Definition: Backend.hpp:122
virtual MachineInstruction * create_Move(MachineOperand *src, MachineOperand *dst) const =0
ConstTag< DataSegmentType, float, FloatID > DSFloat
Definition: DataSegment.hpp:47
virtual BUILTINInst * to_BUILTINInst()
Definition: Instruction.hpp:68
static const COND GE
Signed greater than or equal (N == V)
Definition: Aarch64Cond.hpp:54
virtual Instruction * to_Instruction()
Definition: Value.hpp:88
SourceStateInst * get_source_state() const
Get the SourceStateInst that corresponds to this Instruction.
virtual MachineInstruction * create_Jump(MachineBasicBlock *target) const
PointerTag< DataSegmentType, constant_FMIref, FMIRefID > DSFMIRef
Definition: DataSegment.hpp:49
static const COND NE
Not equal (Z == 0)
Definition: Aarch64Cond.hpp:45
const MethodDescriptor & get_MethodDescriptor() const
Get the MethodDescriptor.
Definition: MethodC2.hpp:170
fieldinfo * get_field() const
Get the accessed field.
argument_type from
Write a value to a static field.
virtual void lowerComplex(Instruction *I, int ruleId)
Type::TypeID get_type() const
get the value type of the instruction
Definition: Value.hpp:68
Get the value of an object&#39;s field.
Base type of instruction that perform a method invocation.
Load a value from an array.
static const COND GT
Signed greater then (Z == 0 &amp;&amp; N == V)
Definition: Aarch64Cond.hpp:56
A basic block of (scheduled) machine instructions.
virtual void create_frame(CodeMemory *CM, StackSlotManager *SSM) const
virtual INVOKESTATICInst * to_INVOKESTATICInst()
Definition: Instruction.hpp:72
Transfers execution back to an unoptimized version of the method.
Get the value of a static field.
union constant_FMIref::@26 p
StackSlotManager * get_StackSlotManager()
Definition: JITData.hpp:56
Write a value to an object&#39;s field.
static const COND CC
Carry clear (C == 0)
Definition: Aarch64Cond.hpp:47
uint8_t u1
Definition: types.hpp:40
u1 * methodptr
Definition: global.hpp:40
Perform a bounds-check for an array-access.
void set_operand(std::size_t i, MachineOperand *op)
methodinfo * method
Definition: references.hpp:101
virtual void visit(LOADInst *I, bool copyOperands)
JNIEnv jthread jobject jclass jlong size
Definition: jvmti.h:387
Represents an explicit null-check on an object reference.
static const COND LT
Signed less then (N != V)
Definition: Aarch64Cond.hpp:55
Store a value into an array.
This stores a reference to a BeginInst.
ManagedStackSlot * create_argument_slot(Type::TypeID type, u4 index)
Create a ManagedStackSlot for an invocation argument.
constant_FMIref * get_fmiref() const
Get information about the method to invoke.
GPRegister R9("R9", 9, 9 *8, 8)
SuccessorListTy::const_iterator succ_end() const
void push_back(MachineInstruction *value)
Appends the given element value to the end of the container.
Instruction::InstID tmp[]
Definition: Matcher.cpp:55
FPRegister V0("V0", 0, 0 *16, 16)
Invoke an instance method with special handling.
Provides a mapping from HIR values to baseline IR variables.
Conditional::CondID get_condition() const
Get the kind of condition that is computed.
int32_t offset
Definition: field.hpp:66
CodeMemory * get_CodeMemory()
Definition: JITData.hpp:57
CodeMemory * get_CodeMemory() const
Get containing CodeMemory.
Definition: Segment.hpp:145
#define OFFSET(s, el)
Definition: memory.hpp:90
imm_union * value
Definition: field.hpp:67
u1 * get_address() const
Get the pointer to the function that implements the builtin functionality.
#define UNIMPLEMENTED_MSG(EXPR_LONG)
Definition: logging.hpp:145
void push_front(MachineInstruction *value)
inserts value to the beginning
Instruction super class.
Definition: Instruction.hpp:75
Specialication for INVOKESpecial and INVOKEStatic.
MIIterator i
int32_t s4
Definition: types.hpp:45
IdxTy insert_tag(SegmentTag< Tag > *tag, IdxTy o)
insert tag
Definition: Segment.hpp:123
SuccessorListTy::const_iterator succ_begin() const
MethodDescriptor & get_MethodDescriptor()
Get the MethodDescriptor of the method to invoke.
virtual const char * get_name() const
codeinfo * code
Definition: method.hpp:103
IdxTy get_index(Tag2 tag) const
get the index of a tag
Definition: Segment.hpp:254
virtual MachineInstruction * create_Move(MachineOperand *src, MachineOperand *dst) const
SuccessorListTy::const_iterator succ_const_iterator
MIIterator e
static const COND HI
Unsigned higher (C == 1 &amp;&amp; Z == 0)
Definition: Aarch64Cond.hpp:52
Get the length of an array.
GPRegister R0("R0", 0, 0 *8, 8)
Proxy to encode explicit and implicit successors.
virtual SourceStateInst * get_SourceStateInst() const
MachineBasicBlock * get_current() const
Definition: Backend.hpp:121
#define I(value)
Definition: codegen.c:279
void lower_source_state_dependencies(MachineReplacementPointInst *MI, SourceStateInst *source_state)
Definition: Backend.cpp:72
Represents a replacement point at a call site (INVOKE* ICMDs) The reference to the corresponding call...
Operands that can be directly used by the machine (register, memory, stackslot)
int8_t s1
Definition: types.hpp:39
const MachineOperandDesc & get_result() const
aarch64::NativeRegister NativeRegister
Definition: Target.hpp:42
int16_t s2
Definition: types.hpp:42
unsigned get_index() const
The index of the argument is represented by this LOADInst.
Segment reference.
Definition: Segment.hpp:44
virtual INVOKESPECIALInst * to_INVOKESPECIALInst()
Definition: Instruction.hpp:71
BeginInst * target
GPRegister R18("R18", 18, 18 *8, 8)
void insert_pred(MachineBasicBlock *value)
Appends the given element value to the list of predecessors.
ConstTag< DataSegmentType, double, DoubleID > DSDouble
Definition: DataSegment.hpp:45
MachineBasicBlock * new_block() const
Definition: Backend.cpp:140
static const COND LE
Signed less than or equal (!(Z == 0 &amp;&amp; N == V))
Definition: Aarch64Cond.hpp:57
MachineOperand * get_op(Instruction *I) const
Definition: Backend.hpp:100
#define ABORT_MSG(EXPR_SHORT, EXPR_LONG)
Definition: logging.hpp:133
void place_deoptimization_marker(SourceStateAwareInst *I)
Definition: Backend.cpp:114
JITData * get_JITData() const
Definition: Backend.hpp:52
Ref get_Ref(std::size_t t)
get a new reference to the segment
Definition: Segment.hpp:208
virtual INVOKEVIRTUALInst * to_INVOKEVIRTUALInst()
Definition: Instruction.hpp:70
virtual INVOKEINTERFACEInst * to_INVOKEINTERFACEInst()
Definition: Instruction.hpp:73
A LOADInst represents an argument that is passed to the current method.
const DataSegment & get_DataSegment() const
get DataSegment
Definition: CodeMemory.hpp:69
u1 * entrypoint
Definition: code.hpp:83
FloatHandling get_FloatHandling() const
Return from the current method.
MethodDescriptor TODO: more info.