CACAO
X86_64Backend.cpp
Go to the documentation of this file.
1 /* src/vm/jit/compiler2/X86_64Backend.hpp - X86_64Backend
2 
3  Copyright (C) 2013
4  CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5 
6  This file is part of CACAO.
7 
8  This program is free software; you can redistribute it and/or
9  modify it under the terms of the GNU General Public License as
10  published by the Free Software Foundation; either version 2, or (at
11  your option) any later version.
12 
13  This program is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with this program; if not, write to the Free Software
20  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21  02110-1301, USA.
22 
23 */
24 
37 #include "vm/jit/PatcherNew.hpp"
38 #include "vm/jit/jit.hpp"
39 #include "vm/jit/code.hpp"
40 #include "vm/class.hpp"
41 #include "vm/field.hpp"
42 
43 #include "mm/memory.hpp"
44 
45 #include "md-trap.hpp"
46 
47 #include "toolbox/OStream.hpp"
48 #include "toolbox/logging.hpp"
49 
50 #define DEBUG_NAME "compiler2/x86_64"
51 
52 // code.hpp fix
53 #undef RAX
54 #undef XMM0
55 
56 namespace cacao {
57 namespace jit {
58 namespace compiler2 {
59 
60 // BackendBase must be specialized in namespace compiler2!
61 using namespace x86_64;
62 
63 template<>
64 const char* BackendBase<X86_64>::get_name() const {
65  return "x86_64";
66 }
67 
68 template<>
70  MachineOperand* dst) const {
71  Type::TypeID type = dst->get_type();
72  assert(type == src->get_type());
73  assert(!(src->is_stackslot() && dst->is_stackslot()));
74  switch (type) {
75  case Type::CharTypeID:
76  case Type::ByteTypeID:
77  case Type::ShortTypeID:
78  case Type::IntTypeID:
79  case Type::LongTypeID:
81  {
82  return new MovInst(
83  SrcOp(src),
84  DstOp(dst),
86  }
87  case Type::DoubleTypeID:
88  {
89  switch (src->get_OperandID()) {
91  return new MovImmSDInst(
92  SrcOp(src),
93  DstOp(dst));
94  default:
95  return new MovSDInst(
96  SrcOp(src),
97  DstOp(dst));
98  }
99  break;
100  }
101  case Type::FloatTypeID:
102  {
103  switch (src->get_OperandID()) {
105  return new MovImmSSInst(
106  SrcOp(src),
107  DstOp(dst));
108  default:
109  return new MovSSInst(
110  SrcOp(src),
111  DstOp(dst));
112  }
113  break;
114  }
115  default:
116  break;
117  }
118  ABORT_MSG("x86_64: Move not supported",
119  "Inst: " << src << " -> " << dst << " type: " << type);
120  return NULL;
121 }
122 
123 template<>
125  return new JumpInst(target);
126 }
127 
128 namespace {
129 
130 template <unsigned size, class T>
131 inline T align_to(T val) {
132  T rem =(val % size);
133  return val + ( rem == 0 ? 0 : size - rem);
134 }
135 
136 template <class I,class Seg>
137 static void write_data(Seg seg, I data) {
138  assert(seg.size() == sizeof(I));
139 
140  for (int i = 0, e = sizeof(I) ; i < e ; ++i) {
141  seg[i] = (u1) 0xff & *(reinterpret_cast<u1*>(&data) + i);
142  }
143 
144 }
145 
146 } // end anonymous namespace
147 
148 template<>
150  EnterInst enter(align_to<16>(SSM->get_frame_size()));
151  enter.emit(CM);
152  // fix alignment
154  emit_nop(CF,CF.size());
155 }
156 
157 void X86_64LoweringVisitor::visit(LOADInst *I, bool copyOperands) {
158  assert(I);
159  //MachineInstruction *minst = loadParameter(I->get_index(), I->get_type());
161  //FIXME inefficient
162  const MachineMethodDescriptor MMD(MD);
163  Type::TypeID type = I->get_type();
164  VirtualRegister *dst = new VirtualRegister(type);
165  MachineInstruction *move = NULL;
166  switch (type) {
167  case Type::ByteTypeID:
168  case Type::ShortTypeID:
169  case Type::IntTypeID:
170  case Type::LongTypeID:
172  move = new MovInst(
173  SrcOp(MMD[I->get_index()]),
174  DstOp(dst),
176  break;
177  case Type::FloatTypeID:
178  move = new MovSSInst(
179  SrcOp(MMD[I->get_index()]),
180  DstOp(dst));
181  break;
182  case Type::DoubleTypeID:
183  move = new MovSDInst(
184  SrcOp(MMD[I->get_index()]),
185  DstOp(dst));
186  break;
187  default:
188  ABORT_MSG("x86_64 type not supported: ",
189  I << " type: " << type);
190  }
191  get_current()->push_back(move);
192  set_op(I,move->get_result().op);
193 }
194 
195 void X86_64LoweringVisitor::visit(CMPInst *I, bool copyOperands) {
196  assert(I);
197  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
198  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
199  Type::TypeID type = I->get_operand(0)->get_type();
200  assert(type == I->get_operand(1)->get_type());
201  switch (type) {
202  case Type::FloatTypeID:
203  case Type::DoubleTypeID:
204  {
205 
211  // unordered 0
212  MBB->push_back(new MovInst(
213  SrcOp(new Immediate(0,Type::IntType())),
214  DstOp(dst),
215  op_size
216  ));
217  // less then (1)
218  MBB->push_back(new MovInst(
219  SrcOp(new Immediate(1,Type::IntType())),
220  DstOp(less),
221  op_size
222  ));
223  // greater then (-1)
224  MBB->push_back(new MovInst(
225  SrcOp(new Immediate(-1,Type::IntType())),
226  DstOp(greater),
227  op_size
228  ));
229  // compare
230  switch (type) {
231  case Type::FloatTypeID:
232  MBB->push_back(new UCOMISSInst(Src2Op(src_op1), Src1Op(src_op2)));
233  break;
234  case Type::DoubleTypeID:
235  MBB->push_back(new UCOMISDInst(Src2Op(src_op1), Src1Op(src_op2)));
236  break;
237  default: assert(0);
238  break;
239  }
240  // cmov less
241  MBB->push_back(new CMovInst(
242  Cond::B,
243  DstSrc1Op(dst),
244  Src2Op(less),
245  op_size
246  ));
247  // cmov greater
248  MBB->push_back(new CMovInst(
249  Cond::A,
250  DstSrc1Op(dst),
251  Src2Op(greater),
252  op_size
253  ));
254  switch (I->get_FloatHandling()) {
255  case CMPInst::L:
256  // treat unordered as GT
257  MBB->push_back(new CMovInst(
258  Cond::P,
259  DstSrc1Op(dst),
260  Src2Op(greater),
261  op_size
262  ));
263  break;
264 
265  case CMPInst::G:
266  // treat unordered as LT
267  MBB->push_back(new CMovInst(
268  Cond::P,
269  DstSrc1Op(dst),
270  Src2Op(less),
271  op_size
272  ));
273  break;
274  default: assert(0);
275  break;
276  }
277  set_op(I,dst);
278  return;
279  }
280  default: break;
281  }
282  ABORT_MSG("x86_64: Lowering not supported",
283  "Inst: " << I << " type: " << type);
284 }
285 
286 void X86_64LoweringVisitor::visit(IFInst *I, bool copyOperands) {
287  assert(I);
288  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
289  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
290  Type::TypeID type = I->get_type();
291  switch (type) {
292  case Type::ByteTypeID:
293  case Type::IntTypeID:
294  case Type::LongTypeID:
295  {
296  // Integer types
297  CmpInst *cmp = new CmpInst(
298  Src2Op(src_op2),
299  Src1Op(src_op1),
301 
302  MachineInstruction *cjmp = NULL;
303  BeginInstRef &then = I->get_then_target();
304  BeginInstRef &els = I->get_else_target();
305 
306  switch (I->get_condition()) {
307  case Conditional::EQ:
308  cjmp = new CondJumpInst(Cond::E, get(then.get()),get(els.get()));
309  break;
310  case Conditional::LT:
311  cjmp = new CondJumpInst(Cond::L, get(then.get()),get(els.get()));
312  break;
313  case Conditional::LE:
314  cjmp = new CondJumpInst(Cond::LE, get(then.get()),get(els.get()));
315  break;
316  case Conditional::GE:
317  cjmp = new CondJumpInst(Cond::GE, get(then.get()),get(els.get()));
318  break;
319  case Conditional::GT:
320  cjmp = new CondJumpInst(Cond::G, get(then.get()),get(els.get()));
321  break;
322  case Conditional::NE:
323  cjmp = new CondJumpInst(Cond::NE, get(then.get()),get(els.get()));
324  break;
325  default:
326  ABORT_MSG("x86_64 Conditional not supported: ",
327  I << " cond: " << I->get_condition());
328  }
329  //MachineInstruction *jmp = new JumpInst(get(els.get()));
330  get_current()->push_back(cmp);
331  get_current()->push_back(cjmp);
332  //get_current()->push_back(jmp);
333 
334  set_op(I,cjmp->get_result().op);
335  return;
336  }
337  default: break;
338  }
339  ABORT_MSG("x86_64: Lowering not supported",
340  "Inst: " << I << " type: " << type);
341 }
342 
343 void X86_64LoweringVisitor::visit(NEGInst *I, bool copyOperands) {
344  assert(I);
346  Type::TypeID type = I->get_type();
348  //GPInstruction::OperandSize op_size = get_OperandSize_from_Type(type);
349 
350  VirtualRegister *dst = new VirtualRegister(type);
351 
352  switch (type) {
353  case Type::FloatTypeID:
354  MBB->push_back(new MovImmSSInst(
355  SrcOp(new Immediate(0x80000000,Type::IntType())),
356  DstOp(dst)
357  ));
358  MBB->push_back(new XORPSInst(
359  Src2Op(src),
360  DstSrc1Op(dst)
361  ));
362  break;
363  case Type::DoubleTypeID:
364  MBB->push_back(new MovImmSDInst(
365  SrcOp(new Immediate(0x8000000000000000L,Type::LongType())),
366  DstOp(dst)
367  ));
368  MBB->push_back(new XORPDInst(
369  Src2Op(src),
370  DstSrc1Op(dst)
371  ));
372  break;
373  case Type::IntTypeID:
374  case Type::LongTypeID:
375  MBB->push_back(get_Backend()->create_Move(src,dst));
377  break;
378  default:
379  ABORT_MSG("x86_64: Lowering not supported",
380  "Inst: " << I << " type: " << type);
381  }
382  set_op(I,dst);
383 }
384 
385 void X86_64LoweringVisitor::visit(ADDInst *I, bool copyOperands) {
386  assert(I);
387  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
388  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
389  Type::TypeID type = I->get_type();
390  VirtualRegister *dst = NULL;
391 
392  setupSrcDst(src_op1, src_op2, dst, type, copyOperands, I->is_commutable());
393 
394  MachineInstruction *alu = NULL;
395 
396  switch (type) {
397  case Type::ByteTypeID:
398  case Type::IntTypeID:
399  case Type::LongTypeID:
400  alu = new AddInst(
401  Src2Op(src_op2),
402  DstSrc1Op(dst),
404  break;
405  case Type::DoubleTypeID:
406  alu = new AddSDInst(
407  Src2Op(src_op2),
408  DstSrc1Op(dst));
409  break;
410  case Type::FloatTypeID:
411  alu = new AddSSInst(
412  Src2Op(src_op2),
413  DstSrc1Op(dst));
414  break;
415  default:
416  ABORT_MSG("x86_64: Lowering not supported",
417  "Inst: " << I << " type: " << type);
418  }
419  get_current()->push_back(alu);
420  set_op(I,alu->get_result().op);
421 }
422 
423 void X86_64LoweringVisitor::visit(ANDInst *I, bool copyOperands) {
424  assert(I);
425  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
426  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
427  Type::TypeID type = I->get_type();
428  VirtualRegister *dst = NULL;
429 
430  setupSrcDst(src_op1, src_op2, dst, type, copyOperands, I->is_commutable());
431 
432  MachineInstruction *alu = NULL;
433 
434  switch (type) {
435  case Type::ByteTypeID:
436  case Type::IntTypeID:
437  case Type::LongTypeID:
438  alu = new AndInst(
439  Src2Op(src_op2),
440  DstSrc1Op(dst),
442  break;
443  default:
444  ABORT_MSG("x86_64: Lowering not supported",
445  "Inst: " << I << " type: " << type);
446  }
447  get_current()->push_back(alu);
448  set_op(I,alu->get_result().op);
449 }
450 
451 void X86_64LoweringVisitor::visit(ORInst *I, bool copyOperands) {
452  assert(I);
453  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
454  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
455  Type::TypeID type = I->get_type();
456  VirtualRegister *dst = NULL;
457 
458  setupSrcDst(src_op1, src_op2, dst, type, copyOperands, I->is_commutable());
459 
460  MachineInstruction *alu = NULL;
461 
462  switch (type) {
463  case Type::ByteTypeID:
464  case Type::IntTypeID:
465  case Type::LongTypeID:
466  alu = new OrInst(
467  Src2Op(src_op2),
468  DstSrc1Op(dst),
470  break;
471  default:
472  ABORT_MSG("x86_64: Lowering not supported",
473  "Inst: " << I << " type: " << type);
474  }
475  get_current()->push_back(alu);
476  set_op(I,alu->get_result().op);
477 }
478 
479 void X86_64LoweringVisitor::visit(XORInst *I, bool copyOperands) {
480  assert(I);
481  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
482  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
483  Type::TypeID type = I->get_type();
484  VirtualRegister *dst = NULL;
485 
486  setupSrcDst(src_op1, src_op2, dst, type, copyOperands, I->is_commutable());
487 
488  MachineInstruction *alu = NULL;
489 
490  switch (type) {
491  case Type::ByteTypeID:
492  case Type::IntTypeID:
493  case Type::LongTypeID:
494  alu = new XorInst(
495  Src2Op(src_op2),
496  DstSrc1Op(dst),
498  break;
499  default:
500  ABORT_MSG("x86_64: Lowering not supported",
501  "Inst: " << I << " type: " << type);
502  }
503  get_current()->push_back(alu);
504  set_op(I,alu->get_result().op);
505 }
506 
507 void X86_64LoweringVisitor::visit(SUBInst *I, bool copyOperands) {
508  assert(I);
509  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
510  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
511  Type::TypeID type = I->get_type();
512  VirtualRegister *dst = NULL;
513 
514  setupSrcDst(src_op1, src_op2, dst, type, copyOperands, I->is_commutable());
515 
516  MachineInstruction *alu = NULL;
517 
518  switch (type) {
519  case Type::ByteTypeID:
520  case Type::IntTypeID:
521  case Type::LongTypeID:
522  alu = new SubInst(
523  Src2Op(src_op2),
524  DstSrc1Op(dst),
526  break;
527  case Type::DoubleTypeID:
528  alu = new SubSDInst(
529  Src2Op(src_op2),
530  DstSrc1Op(dst));
531  break;
532  case Type::FloatTypeID:
533  alu = new SubSSInst(
534  Src2Op(src_op2),
535  DstSrc1Op(dst));
536  break;
537  default:
538  ABORT_MSG("x86_64: Lowering not supported",
539  "Inst: " << I << " type: " << type);
540  }
541  get_current()->push_back(alu);
542  set_op(I,alu->get_result().op);
543 }
544 
545 void X86_64LoweringVisitor::visit(MULInst *I, bool copyOperands) {
546  assert(I);
547  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
548  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
549  Type::TypeID type = I->get_type();
550  VirtualRegister *dst = NULL;
551 
552  setupSrcDst(src_op1, src_op2, dst, type, copyOperands, I->is_commutable());
553 
554  MachineInstruction *alu = NULL;
555 
556  switch (type) {
557  case Type::ByteTypeID:
558  case Type::IntTypeID:
559  case Type::LongTypeID:
560  alu = new IMulInst(
561  Src2Op(src_op2),
562  DstSrc1Op(dst),
564  break;
565  case Type::DoubleTypeID:
566  alu = new MulSDInst(
567  Src2Op(src_op2),
568  DstSrc1Op(dst));
569  break;
570  case Type::FloatTypeID:
571  alu = new MulSSInst(
572  Src2Op(src_op2),
573  DstSrc1Op(dst));
574  break;
575  default:
576  ABORT_MSG("x86_64: Lowering not supported",
577  "Inst: " << I << " type: " << type);
578  }
579  get_current()->push_back(alu);
580  set_op(I,alu->get_result().op);
581 }
582 
583 void X86_64LoweringVisitor::visit(DIVInst *I, bool copyOperands) {
584  assert(I);
585  MachineOperand* src_op1 = get_op(I->get_operand(0)->to_Instruction());
586  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
587  Type::TypeID type = I->get_type();
589 
590  MachineInstruction *alu = NULL;
591 
592  switch (type) {
593  case Type::IntTypeID:
594  case Type::LongTypeID:
595  {
596  // 1. move the dividend to RAX
597  // 2. extend the dividend to RDX:RAX
598  // 3. perform the division
599  MachineOperand *dividendUpper = new NativeRegister(type, &RDX);
600  MachineOperand *result = new NativeRegister(type, &RAX);
601  MachineInstruction *convertToQuadword = new CDQInst(DstSrc1Op(dividendUpper), DstSrc2Op(result), opsize);
602  get_current()->push_back(get_Backend()->create_Move(src_op1, result));
603  get_current()->push_back(convertToQuadword);
604  alu = new IDivInst(Src2Op(src_op2), DstSrc1Op(result), DstSrc2Op(dividendUpper), opsize);
605  break;
606  }
607  case Type::DoubleTypeID:
608  {
609  VirtualRegister *dst = new VirtualRegister(type);
610  MachineInstruction *mov = get_Backend()->create_Move(src_op1, dst);
611  get_current()->push_back(mov);
612  alu = new DivSDInst(
613  Src2Op(src_op2),
614  DstSrc1Op(dst));
615  break;
616  }
617  case Type::FloatTypeID:
618  {
619  VirtualRegister *dst = new VirtualRegister(type);
620  MachineInstruction *mov = get_Backend()->create_Move(src_op1, dst);
621  get_current()->push_back(mov);
622  alu = new DivSSInst(
623  Src2Op(src_op2),
624  DstSrc1Op(dst));
625  break;
626  }
627  default:
628  ABORT_MSG("x86_64: Lowering not supported",
629  "Inst: " << I << " type: " << type);
630  }
631 
632  get_current()->push_back(alu);
633  set_op(I,alu->get_result().op);
634 }
635 
636 void X86_64LoweringVisitor::visit(REMInst *I, bool copyOperands) {
637  assert(I);
638 
639  MachineOperand* dividendLower;
640  MachineOperand* src_op2 = get_op(I->get_operand(1)->to_Instruction());
641  Type::TypeID type = I->get_type();
643  MachineOperand *dividend = get_op(I->get_operand(0)->to_Instruction());;
644 
645  MachineInstruction *resultInst = NULL;
646  MachineOperand *resultOperand;
647  MachineInstruction *convertToQuadword;
648 
649  StackSlotManager *ssm;
650  ManagedStackSlot *src;
651  ManagedStackSlot *dst;
652  ManagedStackSlot *resultSlot;
653 
654  switch (type) {
655  case Type::IntTypeID:
656  case Type::LongTypeID:
657 
658  // 1. move the dividend to RAX
659  // 2. extend the dividend to RDX:RAX
660  // 3. perform the division
661  resultOperand = new NativeRegister(type, &RDX);
662  dividendLower = new NativeRegister(type, &RAX);
663  convertToQuadword = new CDQInst(DstSrc1Op(dividendLower), DstSrc2Op(resultOperand), opsize);
664  get_current()->push_back(get_Backend()->create_Move(dividend, dividendLower));
665  get_current()->push_back(convertToQuadword);
666  resultInst = new IDivInst(Src2Op(src_op2), DstSrc1Op(resultOperand), DstSrc2Op(dividendLower), opsize);
667  get_current()->push_back(resultInst);
668  break;
669  case Type::FloatTypeID:
670  case Type::DoubleTypeID:
672  src = ssm->create_slot(type);
673  dst = ssm->create_slot(type);
674  resultSlot = ssm->create_slot(type);
675 
676  // operands of the FP stack can only be loaded from memory
677  get_current()->push_back(get_Backend()->create_Move(dividend, src));
678  get_current()->push_back(get_Backend()->create_Move(src_op2, dst));
679 
680  // initialize the FP stack
681  get_current()->push_back(new FLDInst(SrcMemOp(dst), opsize));
682  get_current()->push_back(new FLDInst(SrcMemOp(src), opsize));
683 
684  get_current()->push_back(new FPRemInst(opsize));
685  resultInst = new FSTPInst(DstMemOp(resultSlot), opsize);
686  get_current()->push_back(resultInst);
687 
688  // clean the FP stack
691  break;
692  default:
693  ABORT_MSG("x86_64: Lowering not supported",
694  "Inst: " << I << " type: " << type);
695  }
696 
697  set_op(I,resultInst->get_result().op);
698 }
699 
700 void X86_64LoweringVisitor::visit(AREFInst *I, bool copyOperands) {
701  // Only emit Instructions if Pattern Matching is used. If not ASTOREInst/ALOADInst will handle everything
702 #if 0
703 //#ifdef PATTERN_MATCHING // won't currently work because if base or index are modified, AREF should be modified instead/aswell.
704  assert(I);
705  MachineOperand* src_ref = get_op(I->get_operand(0)->to_Instruction());
706  MachineOperand* src_index = get_op(I->get_operand(1)->to_Instruction());
707  assert(src_ref->get_type() == Type::ReferenceTypeID);
708  assert(src_index->get_type() == Type::IntTypeID);
709 
710  Type::TypeID type = I->get_type();
711 
713 
714  s4 offset;
715  switch (type) {
716  case Type::ByteTypeID:
717  offset = OFFSET(java_bytearray_t, data[0]);
718  break;
719  case Type::ShortTypeID:
720  offset = OFFSET(java_shortarray_t, data[0]);
721  break;
722  case Type::IntTypeID:
723  offset = OFFSET(java_intarray_t, data[0]);
724  break;
725  case Type::LongTypeID:
726  offset = OFFSET(java_longarray_t, data[0]);
727  break;
728  case Type::FloatTypeID:
729  offset = OFFSET(java_floatarray_t, data[0]);
730  break;
731  case Type::DoubleTypeID:
732  offset = OFFSET(java_doublearray_t, data[0]);
733  break;
735  offset = OFFSET(java_objectarray_t, data[0]);
736  break;
737  default:
738  ABORT_MSG("x86_64 Lowering not supported",
739  "Inst: " << I << " type: " << type);
740  offset = 0;
741  }
742 
743  // create modrm source operand
744  MachineOperand *modrm = new X86_64ModRMOperand(BaseOp(src_ref),IndexOp(src_index),type,offset);
746  get_current()->push_back(lea);
747  set_op(I,lea->get_result().op);
748 #endif
749 }
750 
751 void X86_64LoweringVisitor::visit(ALOADInst *I, bool copyOperands) {
752  assert(I);
753  Type::TypeID type = I->get_type();
754  Instruction* ref_inst = I->get_operand(0)->to_Instruction();
755  MachineOperand *vreg = new VirtualRegister(type);
756  MachineOperand *modrm = NULL;
757 
758  // if Pattern Matching is used, src op is Register with Effective Address , otherwise src op is AREFInst
759 #if 0
760 //#ifdef PATTERN_MATCHING // won't currently work because if base or index are modified, AREF should be modified instead/aswell.
761  MachineOperand* src_ref = get_op(ref_inst);
762  assert(src_ref->get_type() == Type::ReferenceTypeID);
763 
764  modrm = new X86_64ModRMOperand(BaseOp(src_ref));
765 #else
766  MachineOperand* src_base = get_op(ref_inst->get_operand(0)->to_Instruction());
767  assert(src_base->get_type() == Type::ReferenceTypeID);
768  MachineOperand* src_index = get_op(ref_inst->get_operand(1)->to_Instruction());
769  assert(src_index->get_type() == Type::IntTypeID);
770 
771  s4 offset;
772  switch (type) {
773  case Type::ByteTypeID:
774  offset = OFFSET(java_bytearray_t, data[0]);
775  break;
776  case Type::ShortTypeID:
777  offset = OFFSET(java_shortarray_t, data[0]);
778  break;
779  case Type::IntTypeID:
780  offset = OFFSET(java_intarray_t, data[0]);
781  break;
782  case Type::LongTypeID:
783  offset = OFFSET(java_longarray_t, data[0]);
784  break;
785  case Type::FloatTypeID:
786  offset = OFFSET(java_floatarray_t, data[0]);
787  break;
788  case Type::DoubleTypeID:
789  offset = OFFSET(java_doublearray_t, data[0]);
790  break;
792  offset = OFFSET(java_objectarray_t, data[0]);
793  break;
794  default:
795  ABORT_MSG("x86_64 Lowering not supported",
796  "Inst: " << I << " type: " << type);
797  offset = 0;
798  }
799 
800  modrm = new X86_64ModRMOperand(type,BaseOp(src_base),IndexOp(src_index),type,offset);
801 #endif
802  MachineInstruction *move = get_Backend()->create_Move(modrm, vreg);
803  get_current()->push_back(move);
804  set_op(I,move->get_result().op);
805 }
806 
807 void X86_64LoweringVisitor::visit(ASTOREInst *I, bool copyOperands) {
808  assert(I);
809  Instruction* ref_inst = I->get_operand(0)->to_Instruction();
810  MachineOperand* src_value = get_op(I->get_operand(1)->to_Instruction());
811  Type::TypeID type = src_value->get_type();
812  MachineOperand *modrm = NULL;
813 
814  // if Pattern Matching is used, src op is Register with Effective Address , otherwise src op is AREFInst
815 #if 0
816 //#ifdef PATTERN_MATCHING // won't currently work because if base or index are modified, AREF should be modified instead/aswell.
817  MachineOperand* src_ref = get_op(ref_inst);
818  assert(src_ref->get_type() == Type::ReferenceTypeID);
819 
820  modrm = new X86_64ModRMOperand(BaseOp(src_ref));
821 #else
822  MachineOperand* src_base = get_op(ref_inst->get_operand(0)->to_Instruction());
823  assert(src_base->get_type() == Type::ReferenceTypeID);
824  MachineOperand* src_index = get_op(ref_inst->get_operand(1)->to_Instruction());
825  assert(src_index->get_type() == Type::IntTypeID);
826 
827  s4 offset;
828  switch (type) {
829  case Type::ByteTypeID:
830  offset = OFFSET(java_bytearray_t, data[0]);
831  break;
832  case Type::ShortTypeID:
833  offset = OFFSET(java_shortarray_t, data[0]);
834  break;
835  case Type::IntTypeID:
836  offset = OFFSET(java_intarray_t, data[0]);
837  break;
838  case Type::LongTypeID:
839  offset = OFFSET(java_longarray_t, data[0]);
840  break;
841  case Type::FloatTypeID:
842  offset = OFFSET(java_floatarray_t, data[0]);
843  break;
844  case Type::DoubleTypeID:
845  offset = OFFSET(java_doublearray_t, data[0]);
846  break;
848  offset = OFFSET(java_objectarray_t, data[0]);
849  break;
850  default:
851  ABORT_MSG("x86_64 Lowering not supported",
852  "Inst: " << I << " type: " << type);
853  offset = 0;
854  }
855 
856  modrm = new X86_64ModRMOperand(type,BaseOp(src_base),IndexOp(src_index),type,offset);
857 #endif
858  MachineInstruction *move = get_Backend()->create_Move(src_value, modrm);
859  get_current()->push_back(move);
860  set_op(I,move->get_result().op);
861 }
862 
863 void X86_64LoweringVisitor::visit(ARRAYLENGTHInst *I, bool copyOperands) {
864  assert(I);
865 
866  // Implicit null-checks are handled via deoptimization.
868 
869  MachineOperand* src_op = get_op(I->get_operand(0)->to_Instruction());
870  assert(I->get_type() == Type::IntTypeID);
871  assert(src_op->get_type() == Type::ReferenceTypeID);
873  // create modrm source operand
875  MachineInstruction *move = new MovInst(SrcOp(modrm)
876  ,DstOp(vreg)
878  );
879  get_current()->push_back(move);
880  set_op(I,move->get_result().op);
881 }
882 
884  assert(I);
885  MachineOperand* src_ref = get_op(I->get_operand(0)->to_Instruction());
886  MachineOperand* src_index = get_op(I->get_operand(1)->to_Instruction());
887  assert(src_ref->get_type() == Type::ReferenceTypeID);
888  assert(src_index->get_type() == Type::IntTypeID);
889 
890  // Implicit null-checks are handled via deoptimization.
892 
893  // load array length
896  MachineInstruction *move = new MovInst(SrcOp(modrm)
897  ,DstOp(len)
899  );
900  get_current()->push_back(move);
901 
902  // compare with index
903  CmpInst *cmp = new CmpInst(
904  Src2Op(len),
905  Src1Op(src_index),
907  get_current()->push_back(cmp);
908 
909  // throw exception
911  get_current()->push_back(trap);
912 }
913 
914 void X86_64LoweringVisitor::visit(RETURNInst *I, bool copyOperands) {
915  assert(I);
916  Type::TypeID type = I->get_type();
917  MachineOperand* src_op = (type == Type::VoidTypeID ? 0 : get_op(I->get_operand(0)->to_Instruction()));
918  switch (type) {
919  case Type::CharTypeID:
920  case Type::ByteTypeID:
921  case Type::ShortTypeID:
922  case Type::IntTypeID:
923  case Type::LongTypeID:
925  {
926  MachineOperand *ret_reg = new NativeRegister(type,&RAX);
927  MachineInstruction *reg = new MovInst(
928  SrcOp(src_op),
929  DstOp(ret_reg),
931  LeaveInst *leave = new LeaveInst();
932  RetInst *ret = new RetInst(get_OperandSize_from_Type(type),SrcOp(ret_reg));
933  get_current()->push_back(reg);
934  get_current()->push_back(leave);
935  get_current()->push_back(ret);
936  set_op(I,ret->get_result().op);
937  return;
938  }
939  case Type::FloatTypeID:
940  {
941  MachineOperand *ret_reg = new NativeRegister(type,&XMM0);
942  MachineInstruction *reg = new MovSSInst(
943  SrcOp(src_op),
944  DstOp(ret_reg));
945  LeaveInst *leave = new LeaveInst();
946  RetInst *ret = new RetInst(get_OperandSize_from_Type(type),SrcOp(ret_reg));
947  get_current()->push_back(reg);
948  get_current()->push_back(leave);
949  get_current()->push_back(ret);
950  set_op(I,ret->get_result().op);
951  return;
952  }
953  case Type::DoubleTypeID:
954  {
955  MachineOperand *ret_reg = new NativeRegister(type,&XMM0);
956  MachineInstruction *reg = new MovSDInst(
957  SrcOp(src_op),
958  DstOp(ret_reg));
959  LeaveInst *leave = new LeaveInst();
960  RetInst *ret = new RetInst(get_OperandSize_from_Type(type),SrcOp(ret_reg));
961  get_current()->push_back(reg);
962  get_current()->push_back(leave);
963  get_current()->push_back(ret);
964  set_op(I,ret->get_result().op);
965  return;
966  }
967  case Type::VoidTypeID:
968  {
969  LeaveInst *leave = new LeaveInst();
970  RetInst *ret = new RetInst();
971  get_current()->push_back(leave);
972  get_current()->push_back(ret);
973  set_op(I,ret->get_result().op);
974  return;
975  }
976  default: break;
977  }
978  ABORT_MSG("x86_64 Lowering not supported",
979  "Inst: " << I << " type: " << type);
980 }
981 
982 void X86_64LoweringVisitor::visit(CASTInst *I, bool copyOperands) {
983  assert(I);
984  MachineOperand* src_op = get_op(I->get_operand(0)->to_Instruction());
986  Type::TypeID to = I->get_type();
987 
988  switch (from) {
989  case Type::IntTypeID:
990  {
991  switch (to) {
992  case Type::ByteTypeID:
993  {
994  MachineInstruction *mov = new MovSXInst(SrcOp(src_op), DstOp(new VirtualRegister(to)),
996  get_current()->push_back(mov);
997  set_op(I, mov->get_result().op);
998  return;
999  }
1000  case Type::CharTypeID:
1001  case Type::ShortTypeID:
1002  {
1003  MachineInstruction *mov = new MovSXInst(SrcOp(src_op), DstOp(new VirtualRegister(to)),
1005  get_current()->push_back(mov);
1006  set_op(I, mov->get_result().op);
1007  return;
1008  }
1009  case Type::LongTypeID:
1010  {
1011  MachineInstruction *mov = new MovSXInst(
1012  SrcOp(src_op),
1013  DstOp(new VirtualRegister(to)),
1015  get_current()->push_back(mov);
1016  set_op(I,mov->get_result().op);
1017  return;
1018  }
1019  case Type::DoubleTypeID:
1020  {
1022  MachineInstruction *clearResult = new MovImmSDInst(SrcOp(new Immediate(0, Type::DoubleType())), DstOp(result));
1023  MachineInstruction *conversion = new CVTSI2SDInst(
1024  SrcOp(src_op),
1025  DstOp(result),
1027  get_current()->push_back(clearResult);
1028  get_current()->push_back(conversion);
1029  set_op(I,conversion->get_result().op);
1030  return;
1031  }
1032  case Type::FloatTypeID:
1033  {
1034  MachineInstruction *mov = new CVTSI2SSInst(
1035  SrcOp(src_op),
1036  DstOp(new VirtualRegister(to)),
1038  get_current()->push_back(mov);
1039  set_op(I,mov->get_result().op);
1040  return;
1041  }
1042  default:
1043  break;
1044  }
1045  break;
1046  }
1047  case Type::LongTypeID:
1048  {
1049  switch (to) {
1050  case Type::IntTypeID:
1051  {
1052  // force a 32bit move to cut the upper byte
1054  get_current()->push_back(mov);
1055  set_op(I, mov->get_result().op);
1056  return;
1057  }
1058  case Type::DoubleTypeID:
1059  {
1060  MachineInstruction *mov = new CVTSI2SDInst(
1061  SrcOp(src_op),
1062  DstOp(new VirtualRegister(to)),
1064  get_current()->push_back(mov);
1065  set_op(I,mov->get_result().op);
1066  return;
1067  }
1068  case Type::FloatTypeID:
1069  {
1070  MachineInstruction *mov = new CVTSI2SSInst(
1071  SrcOp(src_op),
1072  DstOp(new VirtualRegister(to)),
1074  get_current()->push_back(mov);
1075  set_op(I,mov->get_result().op);
1076  return;
1077  }
1078  default:
1079  break;
1080  }
1081 
1082  break;
1083  }
1084 
1085  case Type::DoubleTypeID:
1086  {
1087  switch (to) {
1088 
1089  case Type::IntTypeID:
1090  {
1091  // TODO: currently this is replaced by the stackanalysis pass with ICMD_BUILTIN and therefore implemented
1092  // in a builtin function
1093  }
1094  case Type::LongTypeID:
1095  {
1096  // TODO: currently this is replaced by the stackanalysis pass with ICMD_BUILTIN and therefore implemented
1097  // in a builtin function
1098  }
1099  case Type::FloatTypeID:
1100  {
1101  MachineInstruction *mov = new CVTSD2SSInst(
1102  SrcOp(src_op),
1103  DstOp(new VirtualRegister(to)),
1105  get_current()->push_back(mov);
1106  set_op(I,mov->get_result().op);
1107  return;
1108  }
1109  default:
1110  break;
1111  }
1112  break;
1113  }
1114  case Type::FloatTypeID:
1115  {
1116  switch(to) {
1117 
1118  case Type::IntTypeID:
1119  {
1120  // TODO: currently this is replaced by the stackanalysis pass with ICMD_BUILTIN and therefore implemented
1121  // in a builtin function
1122  }
1123  case Type::LongTypeID:
1124  {
1125  // TODO: currently this is replaced by the stackanalysis pass with ICMD_BUILTIN and therefore implemented
1126  // in a builtin function
1127  }
1128  case Type::DoubleTypeID:
1129  {
1130  MachineInstruction *mov = new CVTSS2SDInst(
1131  SrcOp(src_op),
1132  DstOp(new VirtualRegister(to)),
1134  get_current()->push_back(mov);
1135  set_op(I,mov->get_result().op);
1136  return;
1137  }
1138  default:
1139  break;
1140  }
1141  break;
1142  }
1143  default:
1144  break;
1145  }
1146 
1147  ABORT_MSG("x86_64 Cast not supported!", "From " << from << " to " << to );
1148 }
1149 
1150 void X86_64LoweringVisitor::visit(INVOKEInst *I, bool copyOperands) {
1151  assert(I);
1152  Type::TypeID type = I->get_type();
1154  MachineMethodDescriptor MMD(MD);
1156 
1157  // operands for the call
1159  MachineOperand *result = &NoOperand;
1160 
1161  // get return value
1162  switch (type) {
1163  case Type::IntTypeID:
1164  case Type::LongTypeID:
1165  case Type::ReferenceTypeID:
1166  result = new NativeRegister(type,&RAX);
1167  break;
1168  case Type::FloatTypeID:
1169  case Type::DoubleTypeID:
1170  result = new NativeRegister(type,&XMM0);
1171  break;
1172  case Type::VoidTypeID:
1173  break;
1174  default:
1175  ABORT_MSG("x86_64 Lowering not supported",
1176  "Inst: " << I << " type: " << type);
1177  }
1178 
1179  // create call
1180  MachineInstruction* call = new CallInst(SrcOp(addr),DstOp(result),I->op_size());
1181  // move values to parameters
1182  int arg_counter = 0;
1183  for (std::size_t i = 0; i < I->op_size(); ++i ) {
1184  MachineOperand *arg_dst = MMD[i];
1185  if (arg_dst->is_StackSlot()) {
1186  arg_dst = SSM->create_argument_slot(arg_dst->get_type(), arg_counter++);
1187  }
1188 
1191  arg_dst
1192  );
1193  get_current()->push_back(mov);
1194  // set call operand
1195  call->set_operand(i+1,arg_dst);
1196  }
1197  // spill caller saved
1198 
1199  if (I->to_INVOKESTATICInst() || I->to_INVOKESPECIALInst()) {
1200  methodinfo* callee = I->get_fmiref()->p.method;
1201  Immediate *method_address = new Immediate(reinterpret_cast<s8>(callee->code->entrypoint),
1203  MachineInstruction *mov = get_Backend()->create_Move(method_address, addr);
1204  get_current()->push_back(mov);
1205  } else if (I->to_INVOKEVIRTUALInst()) {
1206 
1207  // Implicit null-checks are handled via deoptimization.
1209 
1210  methodinfo* callee = I->get_fmiref()->p.method;
1211  int32_t s1 = OFFSET(vftbl_t, table[0]) + sizeof(methodptr) * callee->vftblindex;
1213  MachineOperand *receiver = get_op(I->get_operand(0)->to_Instruction());
1214  MachineOperand *vftbl_offset = new X86_64ModRMOperand(Type::ReferenceTypeID, BaseOp(receiver), OFFSET(java_object_t, vftbl));
1215  MachineInstruction *load_vftbl_address = new MovInst(SrcOp(vftbl_offset), DstOp(vftbl_address),
1217  get_current()->push_back(load_vftbl_address);
1218 
1219  MachineOperand *method_offset = new X86_64ModRMOperand(Type::ReferenceTypeID, BaseOp(vftbl_address), s1);
1220  MachineInstruction *load_method_address = new MovInst(SrcOp(method_offset), DstOp(addr),
1222  get_current()->push_back(load_method_address);
1223  } else if (I->to_INVOKEINTERFACEInst()) {
1224 
1225  // Implicit null-checks are handled via deoptimization.
1227 
1228  methodinfo* callee = I->get_fmiref()->p.method;
1229  int32_t s1 = OFFSET(vftbl_t, interfacetable[0]) - sizeof(methodptr) * callee->clazz->index;
1231  MachineOperand *receiver = get_op(I->get_operand(0)->to_Instruction());
1232  MachineOperand *vftbl_offset = new X86_64ModRMOperand(Type::ReferenceTypeID, BaseOp(receiver), OFFSET(java_object_t, vftbl));
1233  MachineInstruction *load_vftbl_address = new MovInst(SrcOp(vftbl_offset), DstOp(vftbl_address),
1235  get_current()->push_back(load_vftbl_address);
1236 
1237  VirtualRegister *interface_address = new VirtualRegister(Type::ReferenceTypeID);
1238  MachineOperand *interface_offset = new X86_64ModRMOperand(Type::ReferenceTypeID, BaseOp(vftbl_address), s1);
1239  MachineInstruction *load_interface_address = new MovInst(SrcOp(interface_offset),
1240  DstOp(interface_address), GPInstruction::OS_64);
1241  get_current()->push_back(load_interface_address);
1242 
1243  int32_t s2 = sizeof(methodptr) * (callee - callee->clazz->methods);
1244  MachineOperand *method_offset = new X86_64ModRMOperand(Type::ReferenceTypeID, BaseOp(interface_address), s2);
1245  MachineInstruction *load_method_address = new MovInst(SrcOp(method_offset), DstOp(addr),
1247  get_current()->push_back(load_method_address);
1248  } else if (I->to_BUILTINInst()) {
1249  Immediate *method_address = new Immediate(reinterpret_cast<s8>(I->to_BUILTINInst()->get_address()),
1251  MachineInstruction *mov = get_Backend()->create_Move(method_address, addr);
1252  get_current()->push_back(mov);
1253  }
1254 
1255  // add call
1256  get_current()->push_back(call);
1257 
1258  // get result
1259  if (result != &NoOperand) {
1260  MachineOperand *dst = new VirtualRegister(type);
1261  MachineInstruction *reg = get_Backend()->create_Move(result, dst);
1262  get_current()->push_back(reg);
1263  set_op(I,reg->get_result().op);
1264  }
1265 }
1266 
1268  visit(static_cast<INVOKEInst*>(I), copyOperands);
1269 }
1270 
1272  visit(static_cast<INVOKEInst*>(I), copyOperands);
1273 }
1274 
1276  visit(static_cast<INVOKEInst*>(I), copyOperands);
1277 }
1278 
1280  visit(static_cast<INVOKEInst*>(I), copyOperands);
1281 }
1282 
1283 void X86_64LoweringVisitor::visit(BUILTINInst *I, bool copyOperands) {
1284  visit(static_cast<INVOKEInst*>(I), copyOperands);
1285 }
1286 
1287 void X86_64LoweringVisitor::visit(GETFIELDInst *I, bool copyOperands) {
1288  assert(I);
1289 
1290  // Implicit null-checks are handled via deoptimization.
1292 
1293  MachineOperand* objectref = get_op(I->get_operand(0)->to_Instruction());
1294  MachineOperand *field_address = new X86_64ModRMOperand(I->get_type(),
1295  BaseOp(objectref), I->get_field()->offset);
1296  MachineOperand *vreg = new VirtualRegister(I->get_type());
1297  MachineInstruction *read_field = get_Backend()->create_Move(field_address, vreg);
1298  get_current()->push_back(read_field);
1299  set_op(I, read_field->get_result().op);
1300 }
1301 
1302 void X86_64LoweringVisitor::visit(PUTFIELDInst *I, bool copyOperands) {
1303  assert(I);
1304 
1305  // Implicit null-checks are handled via deoptimization.
1307 
1308  MachineOperand *objectref = get_op(I->get_operand(0)->to_Instruction());
1309  MachineOperand *value = get_op(I->get_operand(1)->to_Instruction());
1310  MachineOperand *field_address = new X86_64ModRMOperand(value->get_type(),
1311  BaseOp(objectref), I->get_field()->offset);
1312  MachineInstruction *write_field = get_Backend()->create_Move(value, field_address);
1313  get_current()->push_back(write_field);
1314  set_op(I, write_field->get_result().op);
1315 }
1316 
1317 void X86_64LoweringVisitor::visit(GETSTATICInst *I, bool copyOperands) {
1318  assert(I);
1319 
1320  Immediate *field_address_imm = new Immediate(reinterpret_cast<s8>(I->get_field()->value),
1322 
1323  // TODO Remove this as soon as loads from immediate addresses are supported.
1325  MachineInstruction *load_field_address = get_Backend()->create_Move(field_address_imm,
1326  field_address);
1327  get_current()->push_back(load_field_address);
1328 
1329  MachineOperand *modrm = new X86_64ModRMOperand(I->get_type(), BaseOp(field_address));
1330  MachineOperand *vreg = new VirtualRegister(I->get_type());
1331  MachineInstruction *read_field = get_Backend()->create_Move(modrm, vreg);
1332  get_current()->push_back(read_field);
1333  set_op(I, read_field->get_result().op);
1334 }
1335 
1336 void X86_64LoweringVisitor::visit(PUTSTATICInst *I, bool copyOperands) {
1337  assert(I);
1338 
1339  Immediate *field_address_imm = new Immediate(reinterpret_cast<s8>(I->get_field()->value),
1341 
1342  // TODO Remove this as soon as stores to immediate addresses are supported.
1344  MachineInstruction *load_field_address = get_Backend()->create_Move(field_address_imm,
1345  field_address);
1346  get_current()->push_back(load_field_address);
1347 
1348  MachineOperand *value = get_op(I->get_operand(0)->to_Instruction());
1349  MachineOperand *modrm = new X86_64ModRMOperand(value->get_type(), BaseOp(field_address));
1350  MachineInstruction *write_field = get_Backend()->create_Move(value, modrm);
1351  get_current()->push_back(write_field);
1352  set_op(I, write_field->get_result().op);
1353 }
1354 
1356  assert(I);
1357  MachineOperand* src_op = get_op(I->get_operand(0)->to_Instruction());
1358  Type::TypeID type = I->get_type();
1359 
1362  e = I->match_end(); i != e; ++i) {
1363  // create compare
1364  CmpInst *cmp = new CmpInst(
1365  Src2Op(new Immediate(*i,Type::IntType())),
1366  Src1Op(src_op),
1368  get_current()->push_back(cmp);
1369  // create new block
1370  MachineBasicBlock *then_block = get(s->get());
1371  MachineBasicBlock *else_block = new_block();
1372  assert(else_block);
1373  else_block->insert_pred(get_current());
1374  else_block->push_front(new MachineLabelInst());
1375  // create cond jump
1376  MachineInstruction *cjmp = new CondJumpInst(Cond::E, then_block, else_block);
1377  get_current()->push_back(cjmp);
1378  // set current
1379  set_current(else_block);
1380  ++s;
1381  }
1382 
1383  // default
1384  MachineInstruction *jmp = new JumpInst(get(s->get()));
1385  get_current()->push_back(jmp);
1386  assert(++s == I->succ_end());
1387 
1388  set_op(I,jmp->get_result().op);
1389 }
1390 
1392  assert_msg(0 , "Fix CondJump");
1393  assert(I);
1394  MachineOperand* src_op = get_op(I->get_operand(0)->to_Instruction());
1395  Type::TypeID type = I->get_type();
1396  VirtualRegister *src = new VirtualRegister(type);
1397  MachineInstruction *mov = get_Backend()->create_Move(src_op, src);
1398  get_current()->push_back(mov);
1399 
1400  s4 low = I->get_low();
1401  s4 high = I->get_high();
1402 
1403  // adjust offset
1404  if (low != 0) {
1405  SubInst *sub = new SubInst(
1406  Src2Op(new Immediate(low,Type::IntType())),
1407  DstSrc1Op(src),
1409  );
1410  get_current()->push_back(sub);
1411  high -= low;
1412  }
1413  // check range
1414  CmpInst *cmp = new CmpInst(
1415  Src2Op(new Immediate(high,Type::IntType())),
1416  Src1Op(src),
1418  MachineInstruction *cjmp = new CondJumpInst(Cond::G, get(I->succ_front().get()),get((++I->succ_begin())->get()));
1419  get_current()->push_back(cmp);
1420  get_current()->push_back(cjmp);
1421 
1422  // TODO load data segment and jump
1423  // load address
1425  DataFragment data = DS.get_Ref(sizeof(void*) * (high - low + 1));
1426  DataSegment::IdxTy idx = data.get_begin();
1428  WARNING_MSG("TODO","add offset");
1429  MovDSEGInst *dmov = new MovDSEGInst(DstOp(addr),idx);
1430  get_current()->push_back(dmov);
1431  IndirectJumpInst *jmp = new IndirectJumpInst(SrcOp(addr));
1432  // adding targets
1434  e = I->succ_end(); i != e; ++i) {
1435  jmp->add_target(get(i->get()));
1436  }
1437  get_current()->push_back(jmp);
1438  // assert(0 && "load data segment and jump"));
1439  // load table entry
1440  set_op(I,cjmp->get_result().op);
1441 }
1442 
1443 void X86_64LoweringVisitor::visit(CHECKNULLInst *I, bool copyOperands) {
1444 
1445 }
1446 
1447 void X86_64LoweringVisitor::visit(AssumptionInst *I, bool copyOperands) {
1448  assert(I);
1449 
1450  SourceStateInst *source_state = I->get_source_state();
1451  assert(source_state);
1453  source_state->get_source_location(), source_state->op_size());
1454  lower_source_state_dependencies(MI, source_state);
1455  get_current()->push_back(MI);
1456 
1457  // compare with `1`
1458  MachineOperand* cond_op = get_op(I->get_operand(0)->to_Instruction());
1459  Immediate *imm = new Immediate(1,Type::IntType());
1460  CmpInst *cmp = new CmpInst(
1461  Src2Op(imm),
1462  Src1Op(cond_op),
1464  get_current()->push_back(cmp);
1465 
1466  // deoptimize
1468  MachineInstruction *trap = new CondTrapInst(Cond::NE, TRAP_DEOPTIMIZE, SrcOp(methodptr));
1469  get_current()->push_back(trap);
1470 }
1471 
1472 void X86_64LoweringVisitor::visit(DeoptimizeInst *I, bool copyOperands) {
1473  assert(I);
1474 
1475  SourceStateInst *source_state = I->get_source_state();
1476  assert(source_state);
1478  source_state->get_source_location(), source_state->op_size());
1479  lower_source_state_dependencies(MI, source_state);
1480  get_current()->push_back(MI);
1481 
1483  MachineInstruction *deoptimize_trap = new TrapInst(TRAP_DEOPTIMIZE, SrcOp(methodptr));
1484  get_current()->push_back(deoptimize_trap);
1485 }
1486 
1488 
1489  switch(ruleId){
1490  case AddImmImm:
1491  {
1492  assert(I);
1493  Type::TypeID type = I->get_type();
1494  CONSTInst* const_left = I->get_operand(0)->to_Instruction()->to_CONSTInst();
1495  CONSTInst* const_right = I->get_operand(1)->to_Instruction()->to_CONSTInst();
1496 
1497  Immediate *imm = NULL;
1498  switch (type) {
1499  case Type::IntTypeID:
1500  {
1501  s4 val = const_left->get_Int() + const_right->get_Int();
1502  imm = new Immediate(val, Type::IntType());
1503  break;
1504  }
1505  case Type::LongTypeID:
1506  {
1507  s8 val = const_left->get_Long() + const_right->get_Long();
1508  imm = new Immediate(val, Type::LongType());
1509  break;
1510  }
1511  case Type::FloatTypeID:
1512  {
1513  float val = const_left->get_Float() + const_right->get_Float();
1514  imm = new Immediate(val, Type::FloatType());
1515  break;
1516  }
1517  case Type::DoubleTypeID:
1518  {
1519  double val = const_left->get_Double() + const_right->get_Double();
1520  imm = new Immediate(val, Type::DoubleType());
1521  break;
1522  }
1523  default:
1524  assert(0);
1525  break;
1526  }
1527 
1528  VirtualRegister *reg = new VirtualRegister(I->get_type());
1529  MachineInstruction *move = get_Backend()->create_Move(imm,reg);
1530  get_current()->push_back(move);
1531  set_op(I,move->get_result().op);
1532  break;
1533  }
1534  // all immediates should be second operand, see ssa construction pass
1535  case AddRegImm:
1536  { // todo: copyOperands?!?
1537  // todo: extend pattern to not rely on data type, instead check if const fits into imm encoding
1538  assert(I);
1539  Type::TypeID type = I->get_type();
1540 
1541  MachineOperand* src_op = get_op(I->get_operand(0)->to_Instruction());
1542  Immediate* const_op = new Immediate(I->get_operand(1)->to_Instruction()->to_CONSTInst());
1543 
1544  VirtualRegister *dst = new VirtualRegister(type);
1545  MachineInstruction *mov = get_Backend()->create_Move(src_op,dst);
1546 
1547  MachineInstruction *alu = NULL;
1548 
1549  switch (type) {
1550  case Type::ByteTypeID:
1551  case Type::IntTypeID:
1552  case Type::LongTypeID:
1553  alu = new AddInst(
1554  Src2Op(const_op),
1555  DstSrc1Op(dst),
1557  break;
1558  default:
1559  ABORT_MSG("x86_64: AddImm Lowering not supported",
1560  "Inst: " << I << " type: " << type);
1561  }
1562  get_current()->push_back(mov);
1563  get_current()->push_back(alu);
1564  set_op(I,alu->get_result().op);
1565 
1566  break;
1567 
1568  }
1569  case SubRegImm:
1570  {
1571  assert(I);
1572  Type::TypeID type = I->get_type();
1573 
1574  MachineOperand* src_op = get_op(I->get_operand(0)->to_Instruction());
1575  Immediate* const_op = new Immediate(I->get_operand(1)->to_Instruction()->to_CONSTInst());
1576 
1577  VirtualRegister *dst = new VirtualRegister(type);
1578  MachineInstruction *mov = get_Backend()->create_Move(src_op,dst);
1579  MachineInstruction *alu = NULL;
1580 
1581  switch (type) {
1582  case Type::ByteTypeID:
1583  case Type::IntTypeID:
1584  case Type::LongTypeID:
1585  alu = new SubInst(
1586  Src2Op(const_op),
1587  DstSrc1Op(dst),
1589  break;
1590  default:
1591  ABORT_MSG("x86_64: SubRegImm Lowering not supported",
1592  "Inst: " << I << " type: " << type);
1593  }
1594  get_current()->push_back(mov);
1595  get_current()->push_back(alu);
1596  set_op(I,alu->get_result().op);
1597  break;
1598  }
1599  case MulRegImm:
1600  {
1601  // todo: copyOperands?!?
1602  // todo: 3operand version!
1603  assert(I);
1604  Type::TypeID type = I->get_type();
1605 
1606  MachineOperand* src_op = get_op(I->get_operand(0)->to_Instruction());
1607  Immediate* const_op = new Immediate(I->get_operand(1)->to_Instruction()->to_CONSTInst());
1608 
1609  VirtualRegister *dst = new VirtualRegister(type);
1610 
1611  MachineInstruction *alu = NULL;
1612 
1613  switch (type) {
1614  case Type::ByteTypeID:
1615  case Type::IntTypeID:
1616  case Type::LongTypeID:
1617  alu = new IMulImmInst(
1618  Src1Op(src_op),
1619  Src2Op(const_op),
1620  DstOp(dst),
1622  break;
1623  default:
1624  ABORT_MSG("x86_64: MulImm Lowering not supported",
1625  "Inst: " << I << " type: " << type);
1626  }
1627  get_current()->push_back(alu);
1628  set_op(I,alu->get_result().op);
1629 
1630  break;
1631  }
1632  // LEA
1633  case BaseIndexDisplacement:
1634  {
1635  /*
1636  ADDInstID
1637  ADDInstID
1638  stm, // base
1639  stm // index
1640  CONSTInstID // disp
1641  */
1642  assert(I);
1643  Type::TypeID type = I->get_type();
1644 
1645  Instruction* nested_add = I->get_operand(0)->to_Instruction();
1646  MachineOperand* base = get_op(nested_add->get_operand(0)->to_Instruction());
1647  MachineOperand* index = get_op(nested_add->get_operand(1)->to_Instruction());
1648 
1649  CONSTInst* displacement = I->get_operand(1)->to_Instruction()->to_CONSTInst();
1650  VirtualRegister *dst = new VirtualRegister(type);
1651 
1652  MachineOperand *modrm = new X86_64ModRMOperand(Type::VoidTypeID,BaseOp(base),IndexOp(index),displacement->get_value());
1653  MachineInstruction* lea = new LEAInst(DstOp(dst), get_OperandSize_from_Type(type), SrcOp(modrm));
1654 
1655  get_current()->push_back(lea);
1656  set_op(I,lea->get_result().op);
1657  break;
1658  }
1659  case BaseIndexDisplacement2:
1660  {
1661  /*
1662  ADDInstID
1663  stm
1664  ADDInstID
1665  stm
1666  CONSTInstID
1667  */
1668  assert(I);
1669  Type::TypeID type = I->get_type();
1670  MachineOperand* base = get_op(I->get_operand(0)->to_Instruction());
1671 
1672  Instruction* nested_add = I->get_operand(1)->to_Instruction();
1673  MachineOperand* index = get_op(nested_add->get_operand(0)->to_Instruction());
1674 
1675  CONSTInst* displacement = nested_add->get_operand(1)->to_Instruction()->to_CONSTInst();
1676  VirtualRegister *dst = new VirtualRegister(type);
1677 
1678  MachineOperand *modrm = new X86_64ModRMOperand(Type::VoidTypeID,BaseOp(base),IndexOp(index),displacement->get_value());
1679  MachineInstruction* lea = new LEAInst(DstOp(dst), get_OperandSize_from_Type(type), SrcOp(modrm));
1680 
1681  get_current()->push_back(lea);
1682  set_op(I,lea->get_result().op);
1683  break;
1684  }
1685  case BaseIndexMultiplier:
1686  {
1687  /*
1688  ADDInstID
1689  stm, // base
1690  MULInstID
1691  stm, // index
1692  CONSTInstID // multiplier
1693  */
1694  assert(I);
1695  Type::TypeID type = I->get_type();
1696 
1697  MachineOperand* base = get_op(I->get_operand(0)->to_Instruction());
1698 
1699  Instruction* nested_mul = I->get_operand(1)->to_Instruction();
1700  MachineOperand* index = get_op(nested_mul->get_operand(0)->to_Instruction());
1701  CONSTInst* multiplier = nested_mul->get_operand(1)->to_Instruction()->to_CONSTInst();
1702 
1703  VirtualRegister *dst = new VirtualRegister(type);
1705  ,BaseOp(base)
1706  ,IndexOp(index)
1707  ,X86_64ModRMOperand::get_scale(multiplier->get_Int())
1708  );
1709  MachineInstruction* lea = new LEAInst(DstOp(dst), get_OperandSize_from_Type(type), SrcOp(modrm));
1710 
1711  get_current()->push_back(lea);
1712  set_op(I,lea->get_result().op);
1713  break;
1714  }
1715  case BaseIndexMultiplier2:
1716  {
1717  /*
1718  ADDInstID
1719  MULInstID
1720  stm, // index
1721  CONSTInstID // multiplier
1722  stm // base
1723  */
1724  assert(I);
1725  Type::TypeID type = I->get_type();
1726 
1727  MachineOperand* base = get_op(I->get_operand(1)->to_Instruction());
1728 
1729  Instruction* nested_mul = I->get_operand(0)->to_Instruction();
1730  MachineOperand* index = get_op(nested_mul->get_operand(0)->to_Instruction());
1731  CONSTInst* multiplier = nested_mul->get_operand(1)->to_Instruction()->to_CONSTInst();
1732 
1733  VirtualRegister *dst = new VirtualRegister(type);
1735  ,BaseOp(base)
1736  ,IndexOp(index)
1737  ,X86_64ModRMOperand::get_scale(multiplier->get_Int())
1738  );
1739  MachineInstruction* lea = new LEAInst(DstOp(dst), get_OperandSize_from_Type(type), SrcOp(modrm));
1740 
1741  get_current()->push_back(lea);
1742  set_op(I,lea->get_result().op);
1743  break;
1744  }
1745  case BaseIndexMultiplierDisplacement:
1746  {
1747  /*
1748  ADDInstID
1749  ADDInstID
1750  stm, // base
1751  MULInstID
1752  stm, // index
1753  CONSTInstID // multiplier
1754  CONSTInstID // displacement
1755  */
1756  assert(I);
1757  Type::TypeID type = I->get_type();
1758 
1759  Instruction* bim_root = I->get_operand(0)->to_Instruction();
1760  MachineOperand* base = get_op(bim_root->get_operand(0)->to_Instruction());
1761 
1762  Instruction* nested_mul = bim_root->get_operand(1)->to_Instruction();
1763  MachineOperand* index = get_op(nested_mul->get_operand(0)->to_Instruction());
1764  CONSTInst* multiplier = nested_mul->get_operand(1)->to_Instruction()->to_CONSTInst();
1765 
1766  CONSTInst* displacement = I->get_operand(1)->to_Instruction()->to_CONSTInst();
1767 
1768  VirtualRegister *dst = new VirtualRegister(type);
1770  ,BaseOp(base)
1771  ,IndexOp(index)
1772  ,X86_64ModRMOperand::get_scale(multiplier->get_Int())
1773  ,displacement->get_value()
1774  );
1775  MachineInstruction* lea = new LEAInst(DstOp(dst), get_OperandSize_from_Type(type), SrcOp(modrm));
1776 
1777  get_current()->push_back(lea);
1778  set_op(I,lea->get_result().op);
1779  break;
1780  }
1781  case BaseIndexMultiplierDisplacement2:
1782  {
1783  /*
1784  ADDInstID
1785  stm, // base
1786  ADDInstID
1787  MULInstID
1788  stm, // index
1789  CONSTInstID // multiplier
1790  CONSTInstID // displacement
1791  */
1792  assert(I);
1793  Type::TypeID type = I->get_type();
1794 
1795  MachineOperand* base = get_op(I->get_operand(0)->to_Instruction());
1796 
1797  Instruction* mul_add = I->get_operand(1)->to_Instruction();
1798  MachineOperand* index = get_op(mul_add->get_operand(0)->to_Instruction()->get_operand(0)->to_Instruction());
1799  CONSTInst* multiplier = mul_add->get_operand(0)->to_Instruction()->get_operand(1)->to_Instruction()->to_CONSTInst();
1800 
1801  CONSTInst* displacement = mul_add->get_operand(1)->to_Instruction()->to_CONSTInst();
1802 
1803  VirtualRegister *dst = new VirtualRegister(type);
1805  ,BaseOp(base)
1806  ,IndexOp(index)
1807  ,X86_64ModRMOperand::get_scale(multiplier->get_Int())
1808  ,displacement->get_value()
1809  );
1810  MachineInstruction* lea = new LEAInst(DstOp(dst), get_OperandSize_from_Type(type), SrcOp(modrm));
1811 
1812  get_current()->push_back(lea);
1813  set_op(I,lea->get_result().op);
1814  break;
1815  }
1816  case ALoad:
1817  {
1818  /*
1819  ALOADInstID
1820  AREFInstID
1821  stm,
1822  stm
1823  */
1824 
1825  assert(I);
1826  Instruction *ref = I->get_operand(0)->to_Instruction();
1827  assert(ref);
1828  MachineOperand* src_ref = get_op(ref->get_operand(0)->to_Instruction());
1829  MachineOperand* src_index = get_op(ref->get_operand(1)->to_Instruction());
1830  assert(src_ref->get_type() == Type::ReferenceTypeID);
1831  assert(src_index->get_type() == Type::IntTypeID);
1832 
1833  Type::TypeID type = ref->get_type();
1834 
1835  VirtualRegister *dst = new VirtualRegister(type);
1836 
1837  s4 offset;
1838  switch (type) {
1839  case Type::ByteTypeID:
1840  offset = OFFSET(java_bytearray_t, data[0]);
1841  break;
1842  case Type::ShortTypeID:
1843  offset = OFFSET(java_shortarray_t, data[0]);
1844  break;
1845  case Type::IntTypeID:
1846  offset = OFFSET(java_intarray_t, data[0]);
1847  break;
1848  case Type::LongTypeID:
1849  offset = OFFSET(java_longarray_t, data[0]);
1850  break;
1851  case Type::FloatTypeID:
1852  offset = OFFSET(java_floatarray_t, data[0]);
1853  break;
1854  case Type::DoubleTypeID:
1855  offset = OFFSET(java_doublearray_t, data[0]);
1856  break;
1857  case Type::ReferenceTypeID:
1858  offset = OFFSET(java_objectarray_t, data[0]);
1859  break;
1860  default:
1861  ABORT_MSG("x86_64 Lowering not supported",
1862  "Inst: " << I << " type: " << type);
1863  offset = 0;
1864  }
1865 
1866  // create modrm source operand
1867  MachineOperand *modrm = new X86_64ModRMOperand(type,BaseOp(src_ref),IndexOp(src_index),type,offset);
1868  MachineInstruction *move = get_Backend()->create_Move(modrm, dst);
1869  get_current()->push_back(move);
1870  set_op(I,move->get_result().op);
1871  break;
1872  }
1873  case AStore:
1874  {
1875  /*
1876  ASTOREInstID
1877  AREFInstID,
1878  stm,
1879  stm
1880  stm
1881  */
1882 
1883  assert(I);
1884  Instruction *ref = I->get_operand(0)->to_Instruction();
1885  assert(ref);
1886  MachineOperand* dst_ref = get_op(ref->get_operand(0)->to_Instruction());
1887  MachineOperand* dst_index = get_op(ref->get_operand(1)->to_Instruction());
1888  assert(dst_ref->get_type() == Type::ReferenceTypeID);
1889  assert(dst_index->get_type() == Type::IntTypeID);
1891 
1892  Type::TypeID type = ref->get_type();
1893 
1894  s4 offset;
1895  switch (type) {
1896  case Type::ByteTypeID:
1897  offset = OFFSET(java_bytearray_t, data[0]);
1898  break;
1899  case Type::ShortTypeID:
1900  offset = OFFSET(java_shortarray_t, data[0]);
1901  break;
1902  case Type::IntTypeID:
1903  offset = OFFSET(java_intarray_t, data[0]);
1904  break;
1905  case Type::LongTypeID:
1906  offset = OFFSET(java_longarray_t, data[0]);
1907  break;
1908  case Type::FloatTypeID:
1909  offset = OFFSET(java_floatarray_t, data[0]);
1910  break;
1911  case Type::DoubleTypeID:
1912  offset = OFFSET(java_doublearray_t, data[0]);
1913  break;
1914  case Type::ReferenceTypeID:
1915  // TODO: implement me
1916  default:
1917  ABORT_MSG("x86_64 Lowering not supported",
1918  "Inst: " << I << " type: " << type);
1919  offset = 0;
1920  }
1921 
1922  // create modrm source operand
1923  MachineOperand *modrm = new X86_64ModRMOperand(type,BaseOp(dst_ref),IndexOp(dst_index),type,offset);
1924  MachineInstruction *move = get_Backend()->create_Move(src, modrm);
1925  get_current()->push_back(move);
1926  set_op(I,move->get_result().op);
1927  break;
1928  }
1929  case AStoreImm:
1930  {
1931  /*
1932  ASTOREInstID
1933  AREFInstID,
1934  stm,
1935  stm
1936  CONSTInstID
1937  */
1938 
1939  assert(I);
1940  Instruction *ref = I->get_operand(0)->to_Instruction();
1941  assert(ref);
1942  MachineOperand* dst_ref = get_op(ref->get_operand(0)->to_Instruction());
1943  MachineOperand* dst_index = get_op(ref->get_operand(1)->to_Instruction());
1944  assert(dst_ref->get_type() == Type::ReferenceTypeID);
1945  assert(dst_index->get_type() == Type::IntTypeID);
1946  Immediate* imm = new Immediate(I->get_operand(1)->to_Instruction()->to_CONSTInst());
1947 
1948  Type::TypeID type = ref->get_type();
1949 
1950  s4 offset;
1951  switch (type) {
1952  case Type::ByteTypeID:
1953  offset = OFFSET(java_bytearray_t, data[0]);
1954  break;
1955  case Type::ShortTypeID:
1956  offset = OFFSET(java_shortarray_t, data[0]);
1957  break;
1958  case Type::IntTypeID:
1959  offset = OFFSET(java_intarray_t, data[0]);
1960  break;
1961  case Type::LongTypeID:
1962  offset = OFFSET(java_longarray_t, data[0]);
1963  break;
1964  case Type::FloatTypeID:
1965  offset = OFFSET(java_floatarray_t, data[0]);
1966  break;
1967  case Type::DoubleTypeID:
1968  offset = OFFSET(java_doublearray_t, data[0]);
1969  break;
1970  case Type::ReferenceTypeID:
1971  // TODO: implement me
1972  default:
1973  ABORT_MSG("x86_64 Lowering not supported",
1974  "Inst: " << I << " type: " << type);
1975  offset = 0;
1976  }
1977 
1978  // create modrm source operand
1979  MachineOperand *modrm = new X86_64ModRMOperand(type,BaseOp(dst_ref),IndexOp(dst_index),type,offset);
1980  MachineInstruction *move = get_Backend()->create_Move(imm, modrm);
1981  get_current()->push_back(move);
1982  set_op(I,move->get_result().op);
1983  break;
1984  }
1985  default:
1986  ABORT_MSG("Rule not supported", "Rule " << ruleId << " is not supported by method lowerComplex!");
1987 
1988  }
1989 }
1990 
1992  Type::TypeID type, bool copyOperands, bool isCommutable) {
1993 
1994  if (!copyOperands){
1995  if (src_op1->is_Register()){
1996  dst = src_op1->to_Register()->to_VirtualRegister();
1997  return;
1998  } else if (src_op2->is_Register() && isCommutable){
1999  dst = src_op2->to_Register()->to_VirtualRegister();
2000  src_op2 = src_op1;
2001  return;
2002  }
2003  }
2004  dst = new VirtualRegister(type);
2005  MachineInstruction *mov = get_Backend()->create_Move(src_op1,dst);
2006  get_current()->push_back(mov);
2007 }
2008 
2009 
2010 #if 0
2011 template<>
2012 compiler2::RegisterFile*
2014  return new x86_64::RegisterFile(type);
2015 }
2016 #endif
2017 
2018 
2019 } // end namespace compiler2
2020 } // end namespace jit
2021 } // end namespace cacao
2022 
2023 
2024 /*
2025  * These are local overrides for various environment variables in Emacs.
2026  * Please do not remove this and leave it at the end of the file, where
2027  * Emacs will automagically detect them.
2028  * ---------------------------------------------------------------------
2029  * Local variables:
2030  * mode: c++
2031  * indent-tabs-mode: t
2032  * c-basic-offset: 4
2033  * tab-width: 4
2034  * End:
2035  * vim:noexpandtab:sw=4:ts=4:
2036  */
static const COND B
below (CF = 1)
Definition: X86_64Cond.hpp:81
void set_op(Instruction *I, MachineOperand *op) const
Definition: Backend.hpp:106
GPRegister RDX("RDX", 0x2, false, 0x2 *8, 8)
std::size_t index
void set_current(MachineBasicBlock *MBB)
Definition: Backend.hpp:122
static const COND E
equal (ZF = 1)
Definition: X86_64Cond.hpp:88
ManagedStackSlot * create_slot(Type::TypeID type)
Create a ManagedStackSlot.
virtual MachineInstruction * create_Move(MachineOperand *src, MachineOperand *dst) const =0
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
Subtract Scalar Double-Precision Floating-Point Values.
virtual BUILTINInst * to_BUILTINInst()
Definition: Instruction.hpp:68
virtual Instruction * to_Instruction()
Definition: Value.hpp:88
CodeFragment get_aligned_CodeFragment(std::size_t size)
get an aligned code fragment
Definition: CodeMemory.cpp:83
Simple wrapper for second operand of an x86_64 instruction.
GPInstruction::OperandSize get_OperandSize_from_Type(const Type::TypeID type)
SourceStateInst * get_source_state() const
Get the SourceStateInst that corresponds to this Instruction.
#define WARNING_MSG(EXPR_SHORT, EXPR_LONG)
Definition: logging.hpp:96
Simple wrapper for first operand of an x86_64 instruction.
virtual MachineInstruction * create_Jump(MachineBasicBlock *target) const
const MethodDescriptor & get_MethodDescriptor() const
Get the MethodDescriptor.
Definition: MethodC2.hpp:170
fieldinfo * get_field() const
Get the accessed field.
argument_type from
Write a value to a static field.
Type::TypeID get_type() const
get the value type of the instruction
Definition: Value.hpp:68
virtual CONSTInst * to_CONSTInst()
Definition: Instruction.hpp:51
Get the value of an object&#39;s field.
virtual void emit(CodeMemory *cm) const
emit machine code
Base type of instruction that perform a method invocation.
Load a value from an array.
static const COND G
greater (ZF = 0 and SF = OF)
Definition: X86_64Cond.hpp:108
A TrapInst represents a hardware trap.
Multiply Scalar Double-Precision Floating-Point Values.
A basic block of (scheduled) machine instructions.
virtual void create_frame(CodeMemory *CM, StackSlotManager *SSM) const
virtual INVOKESTATICInst * to_INVOKESTATICInst()
Definition: Instruction.hpp:72
virtual void lowerComplex(Instruction *I, int ruleId)
Transfers execution back to an unoptimized version of the method.
Get the value of a static field.
union constant_FMIref::@26 p
StackSlotManager * get_StackSlotManager()
Definition: JITData.hpp:56
Convert Dword Integer to Scalar Single-Precision FP Value.
Add Scalar Single-Precision Floating-Point Values.
Write a value to an object&#39;s field.
uint8_t u1
Definition: types.hpp:40
u1 * methodptr
Definition: global.hpp:40
Perform a bounds-check for an array-access.
void set_operand(std::size_t i, MachineOperand *op)
Divide Scalar Single-Precision Floating-Point Values.
methodinfo * method
Definition: references.hpp:101
int64_t s8
Definition: types.hpp:48
JNIEnv jthread jobject jclass jlong size
Definition: jvmti.h:387
Simple wrapper for first operand of an x86_64 instruction which is also used for the result...
Represents an explicit null-check on an object reference.
Store a value into an array.
This stores a reference to a BeginInst.
ManagedStackSlot * create_argument_slot(Type::TypeID type, u4 index)
Create a ManagedStackSlot for an invocation argument.
Subtract Scalar Single-Precision Floating-Point Values.
GPInstruction::OperandSize get_operand_size_from_Type(Type::TypeID type)
constant_FMIref * get_fmiref() const
Get information about the method to invoke.
Represents a speculative assumption that has to be checked at run-time.
SuccessorListTy::const_iterator succ_end() const
virtual void visit(LOADInst *I, bool copyOperands)
void push_back(MachineInstruction *value)
Appends the given element value to the end of the container.
Invoke an instance method with special handling.
Provides a mapping from HIR values to baseline IR variables.
Simple wrapper for the operand of an single operand x86_64 instruction.
Conditional::CondID get_condition() const
Get the kind of condition that is computed.
int32_t offset
Definition: field.hpp:66
CodeMemory * get_CodeMemory()
Definition: JITData.hpp:57
#define OFFSET(s, el)
Definition: memory.hpp:90
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
void emit_nop(CodeFragment code, int length)
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
A &quot;virtual&quot; slot that will eventually be mapped to a machine-level slot.
Multiply Scalar Single-Precision Floating-Point Values.
imm_union * value
Definition: field.hpp:67
u1 * get_address() const
Get the pointer to the function that implements the builtin functionality.
Convert Dword Integer to Scalar Double-Precision FP Value.
void push_front(MachineInstruction *value)
inserts value to the beginning
virtual VirtualRegister * to_VirtualRegister()
Instruction super class.
Definition: Instruction.hpp:75
std::size_t size() const
size of the reference
Definition: Segment.hpp:392
MIIterator i
static ScaleFactor get_scale(Type::TypeID type)
int32_t s4
Definition: types.hpp:45
static const COND L
less (SF &lt;&gt; OF)
Definition: X86_64Cond.hpp:101
SuccessorListTy::const_iterator succ_begin() const
MethodDescriptor & get_MethodDescriptor()
Get the MethodDescriptor of the method to invoke.
virtual const char * get_name() const
codeinfo * code
Definition: method.hpp:103
static const COND GE
greater or equal (SF = OF)
Definition: X86_64Cond.hpp:104
virtual MachineInstruction * create_Move(MachineOperand *src, MachineOperand *dst) const
Simple wrapper for first operand of an x86_64 instruction which is also used for the result...
SuccessorListTy::const_iterator succ_const_iterator
MIIterator e
Get the length of an array.
void setupSrcDst(MachineOperand *&src_op1, MachineOperand *&src_op2, VirtualRegister *&dst, Type::TypeID type, bool copyOperands, bool isCommutable)
Simple wrapper for destination of an x86_64 instruction.
Proxy to encode explicit and implicit successors.
MachineBasicBlock * get_current() const
Definition: Backend.hpp:121
#define I(value)
Definition: codegen.c:279
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
void lower_source_state_dependencies(MachineReplacementPointInst *MI, SourceStateInst *source_state)
Definition: Backend.cpp:72
Operands that can be directly used by the machine (register, memory, stackslot)
GPRegister R10("R10", 0x2, true, 0xa *8, 8)
static const COND NE
not equal (ZF = 0)
Definition: X86_64Cond.hpp:90
GPRegister RAX("RAX", 0x0, false, 0x0 *8, 8)
int8_t s1
Definition: types.hpp:39
const MachineOperandDesc & get_result() const
aarch64::NativeRegister NativeRegister
Definition: Target.hpp:42
int16_t s2
Definition: types.hpp:42
unsigned get_index() const
The index of the argument is represented by this LOADInst.
Segment reference.
Definition: Segment.hpp:44
static const COND A
above (CF = 0 and ZF = 0)
Definition: X86_64Cond.hpp:94
virtual INVOKESPECIALInst * to_INVOKESPECIALInst()
Definition: Instruction.hpp:71
#define assert_msg(COND, EXPR)
Definition: logging.hpp:107
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
static const COND P
parity (PF = 1)
Definition: X86_64Cond.hpp:97
SSERegister XMM0("XMM0", 0x0, false, 0x0 *16, 16)
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
BeginInst * target
void insert_pred(MachineBasicBlock *value)
Appends the given element value to the list of predecessors.
MachineBasicBlock * new_block() const
Definition: Backend.cpp:140
IdxTy get_begin() const
Get the index of the first element.
Definition: Segment.hpp:343
MachineOperand * get_op(Instruction *I) const
Definition: Backend.hpp:100
#define ABORT_MSG(EXPR_SHORT, EXPR_LONG)
Definition: logging.hpp:133
void place_deoptimization_marker(SourceStateAwareInst *I)
Definition: Backend.cpp:114
JITData * get_JITData() const
Definition: Backend.hpp:52
Add Scalar Double-Precision Floating-Point Values.
Ref get_Ref(std::size_t t)
get a new reference to the segment
Definition: Segment.hpp:208
virtual INVOKEVIRTUALInst * to_INVOKEVIRTUALInst()
Definition: Instruction.hpp:70
virtual INVOKEINTERFACEInst * to_INVOKEINTERFACEInst()
Definition: Instruction.hpp:73
A LOADInst represents an argument that is passed to the current method.
static const COND LE
less or equal (ZF = 1 or SF &lt;&gt; OF)
Definition: X86_64Cond.hpp:105
const DataSegment & get_DataSegment() const
get DataSegment
Definition: CodeMemory.hpp:69
Divide Scalar Double-Precision Floating-Point Values.
u1 * entrypoint
Definition: code.hpp:83
FloatHandling get_FloatHandling() const
Return from the current method.
MethodDescriptor TODO: more info.