43 #include "md-trap.hpp"
48 #define DEBUG_NAME "compiler2/x86_64"
59 using namespace x86_64;
117 "Inst: " << src <<
" -> " << dst <<
" type: " << type);
128 template <
unsigned size,
class T>
129 inline T align_to(T val) {
131 return val + ( rem == 0 ? 0 :
size - rem);
177 I <<
" type: " << type);
270 ABORT_MSG(
"x86_64: Lowering not supported",
271 "Inst: " << I <<
" type: " << type);
314 ABORT_MSG(
"x86_64 Conditional not supported: ",
327 ABORT_MSG(
"x86_64: Lowering not supported",
328 "Inst: " << I <<
" type: " << type);
367 ABORT_MSG(
"x86_64: Lowering not supported",
368 "Inst: " << I <<
" type: " << type);
404 ABORT_MSG(
"x86_64: Lowering not supported",
405 "Inst: " << I <<
" type: " << type);
408 set_op(I,alu->get_result().op);
432 ABORT_MSG(
"x86_64: Lowering not supported",
433 "Inst: " << I <<
" type: " << type);
436 set_op(I,alu->get_result().op);
460 ABORT_MSG(
"x86_64: Lowering not supported",
461 "Inst: " << I <<
" type: " << type);
464 set_op(I,alu->get_result().op);
488 ABORT_MSG(
"x86_64: Lowering not supported",
489 "Inst: " << I <<
" type: " << type);
492 set_op(I,alu->get_result().op);
526 ABORT_MSG(
"x86_64: Lowering not supported",
527 "Inst: " << I <<
" type: " << type);
530 set_op(I,alu->get_result().op);
564 ABORT_MSG(
"x86_64: Lowering not supported",
565 "Inst: " << I <<
" type: " << type);
568 set_op(I,alu->get_result().op);
616 ABORT_MSG(
"x86_64: Lowering not supported",
617 "Inst: " << I <<
" type: " << type);
681 ABORT_MSG(
"x86_64: Lowering not supported",
682 "Inst: " << I <<
" type: " << type);
726 ABORT_MSG(
"x86_64 Lowering not supported",
727 "Inst: " << I <<
" type: " << type);
783 ABORT_MSG(
"x86_64 Lowering not supported",
784 "Inst: " << I <<
" type: " << type);
808 ABORT_MSG(
"x86_64 Lowering not supported",
809 "Inst: " << I <<
" type: " << type);
861 ABORT_MSG(
"x86_64 Lowering not supported",
862 "Inst: " << I <<
" type: " << type);
884 ABORT_MSG(
"x86_64 Lowering not supported",
885 "Inst: " << I <<
" type: " << type);
999 ABORT_MSG(
"x86_64 Lowering not supported",
1000 "Inst: " << I <<
" type: " << type);
1168 ABORT_MSG(
"x86_64 Cast not supported!",
"From " << from <<
" to " << to );
1188 ABORT_MSG(
"x86_64 Lowering not supported",
1189 "Inst: " << I <<
" type: " << type);
1195 for (std::size_t
i = 0;
i < I->
op_size(); ++
i ) {
1214 LOG2(
"INVOKESTATICInst: is resolved" <<
nl);
1220 LOG2(
"INVOKESTATICInst: is notresolved" <<
nl);
1239 um = iptr->sx.s23.s3.um;
1247 lm = iptr->sx.s23.s3.fmiref->p.method;
1254 template <
class I,
class Seg>
1255 static void write_data(Seg seg,
I data) {
1256 assert(seg.size() ==
sizeof(
I));
1258 for (
int i = 0,
e =
sizeof(
I) ;
i <
e ; ++
i) {
1259 seg[
i] = (
u1) 0xff & *(reinterpret_cast<u1*>(&data) +
i);
1270 size_t size =
sizeof(
void*);
1290 write_data<void*>(datafrag, fmiref->
p.
field->
value);
1292 assert(0 &&
"Not yet implemented");
1324 ABORT_MSG(
"x86_64 Lowering not supported",
1325 "Inst: " << I <<
" type: " << I->
get_type());
1403 DataSegment::IdxTy idx = data.
get_begin();
1464 set_op(I,move->get_result().op);
1492 ABORT_MSG(
"x86_64: AddImm Lowering not supported",
1493 "Inst: " << I <<
" type: " << type);
1524 ABORT_MSG(
"x86_64: SubRegImm Lowering not supported",
1525 "Inst: " << I <<
" type: " << type);
1557 ABORT_MSG(
"x86_64: MulImm Lowering not supported",
1558 "Inst: " << I <<
" type: " << type);
1566 case BaseIndexDisplacement:
1589 set_op(I,lea->get_result().op);
1592 case BaseIndexDisplacement2:
1615 set_op(I,lea->get_result().op);
1618 case BaseIndexMultiplier:
1647 case BaseIndexMultiplier2:
1676 case BaseIndexMultiplierDisplacement:
1708 set_op(I,lea->get_result().op);
1711 case BaseIndexMultiplierDisplacement2:
1742 set_op(I,lea->get_result().op);
1790 ABORT_MSG(
"x86_64 Lowering not supported",
1791 "Inst: " << I <<
" type: " << type);
1814 ABORT_MSG(
"x86_64 Lowering not supported",
1815 "Inst: " << I <<
" type: " << type);
1865 ABORT_MSG(
"x86_64 Lowering not supported",
1866 "Inst: " << I <<
" type: " << type);
1889 ABORT_MSG(
"x86_64 Lowering not supported",
1890 "Inst: " << I <<
" type: " << type);
1940 ABORT_MSG(
"x86_64 Lowering not supported",
1941 "Inst: " << I <<
" type: " << type);
1964 ABORT_MSG(
"x86_64 Lowering not supported",
1965 "Inst: " << I <<
" type: " << type);
1972 ABORT_MSG(
"Rule not supported",
"Rule " << ruleId <<
" is not supported by method lowerComplex!");
1978 Type::TypeID type,
bool copyOperands,
bool isCommutable) {
1984 }
else if (src_op2->
is_Register() && isCommutable){
1998 compiler2::RegisterFile*
2000 return new x86_64::RegisterFile(type);
static const COND B
below (CF = 1)
void set_op(Instruction *I, MachineOperand *op) const
BeginInstRef & get_else_target()
GPRegister RDX("RDX", 0x2, false, 0x2 *8, 8)
void set_current(MachineBasicBlock *MBB)
static const COND E
equal (ZF = 1)
virtual MachineInstruction * create_Move(MachineOperand *src, MachineOperand *dst) const =0
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
bool is_stackslot() const
Subtract Scalar Double-Precision Floating-Point Values.
Value * get_operand(size_t i)
virtual Instruction * to_Instruction()
CodeFragment get_aligned_CodeFragment(std::size_t size)
get an aligned code fragment
Simple wrapper for second operand of an x86_64 instruction.
GPInstruction::OperandSize get_OperandSize_from_Type(const Type::TypeID type)
#define WARNING_MSG(EXPR_SHORT, EXPR_LONG)
Simple wrapper for first operand of an x86_64 instruction.
virtual MachineInstruction * create_Jump(MachineBasicBlock *target) const
PointerTag< DataSegmentType, constant_FMIref, FMIRefID > DSFMIRef
s4 dseg_add_unique_address(codegendata *cd, void *value)
const MethodDescriptor & get_MethodDescriptor() const
Get the MethodDescriptor.
OperandID get_OperandID() const
Type::TypeID get_type() const
get the value type of the instruction
MatchTy::iterator match_iterator
virtual CONSTInst * to_CONSTInst()
u4 get_frame_size() const
get the size of the stack frame in bytes
static const COND G
greater (ZF = 0 and SF = OF)
Multiply Scalar Double-Precision Floating-Point Values.
A basic block of (scheduled) machine instructions.
virtual void create_frame(CodeMemory *CM, StackSlotManager *SSM) const
#define dseg_add_functionptr(cd, value)
virtual Register * to_Register()
virtual void lowerComplex(Instruction *I, int ruleId)
union constant_FMIref::@26 p
StackSlotManager * get_StackSlotManager()
MethodDescriptor & get_MethodDescriptor()
Convert Dword Integer to Scalar Single-Precision FP Value.
void add_target(MachineBasicBlock *MBB)
Add Scalar Single-Precision Floating-Point Values.
patchref_t * patcher_add_patch_ref(jitdata *jd, functionptr patcher, void *ref, s4 disp)
void set_operand(std::size_t i, MachineOperand *op)
Divide Scalar Single-Precision Floating-Point Values.
constant_FMIref * get_fmiref() const
JNIEnv jthread jobject jclass jlong size
Move with Sign-Extension.
Simple wrapper for first operand of an x86_64 instruction which is also used for the result...
#define PATCHER_get_putstatic
jitdata * get_jitdata() const
Move data seg to register.
static bool is_invalid(IdxTy idx)
is invalid index
this stores a reference to a begin instruction
Subtract Scalar Single-Precision Floating-Point Values.
ManagedStackSlot * create_ManagedStackSlot(Type::TypeID type)
create a new managed stack slot
GPInstruction::OperandSize get_operand_size_from_Type(Type::TypeID type)
SuccessorListTy::const_iterator succ_end() const
virtual void visit(LOADInst *I, bool copyOperands)
void push_back(MachineInstruction *value)
Appends the given element value to the end of the container.
constant_FMIref * fieldref
Simple wrapper for the operand of an single operand x86_64 instruction.
Conditional::CondID get_condition() const
CodeMemory * get_CodeMemory()
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
void emit_nop(CodeFragment code, int length)
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
Multiply Scalar Single-Precision Floating-Point Values.
Convert Dword Integer to Scalar Double-Precision FP Value.
void push_front(MachineInstruction *value)
inserts value to the beginning
virtual VirtualRegister * to_VirtualRegister()
std::size_t size() const
size of the reference
Fieldref, Methodref and InterfaceMethodref.
static ScaleFactor get_scale(Type::TypeID type)
static const COND L
less (SF <> OF)
IdxTy insert_tag(SegmentTag< Tag > *tag, IdxTy o)
insert tag
SuccessorListTy::const_iterator succ_begin() const
cacao::shared_ptr< cacao::Patcher > PatcherPtrTy
virtual const char * get_name() const
static const COND GE
greater or equal (SF = OF)
IdxTy get_index(Tag2 tag) const
get the index of a tag
constant_FMIref * get_fmiref() const
virtual MachineInstruction * create_Move(MachineOperand *src, MachineOperand *dst) const
Simple wrapper for first operand of an x86_64 instruction which is also used for the result...
SuccessorListTy::const_iterator succ_const_iterator
match_iterator match_end()
#define PATCHER_invokestatic_special
Backend * get_Backend() const
void setupSrcDst(MachineOperand *&src_op1, MachineOperand *&src_op2, VirtualRegister *&dst, Type::TypeID type, bool copyOperands, bool isCommutable)
Simple wrapper for destination of an x86_64 instruction.
Proxy to encode explicit and implicit successors.
virtual void emit(CodeMemory *CM) const
emit machine code
Method * get_Method() const
MachineBasicBlock * get_current() const
Type::TypeID get_type() const
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
Operands that can be directly used by the machine (register, memory, stackslot)
static const COND NE
not equal (ZF = 0)
GPRegister RAX("RAX", 0x0, false, 0x0 *8, 8)
const MachineOperandDesc & get_result() const
static bool class_is_or_almost_initialized(classinfo *c)
x86_64::NativeRegister NativeRegister
unsigned get_index() const
#define INSTRUCTION_IS_UNRESOLVED(iptr)
static const COND A
above (CF = 0 and ZF = 0)
#define assert_msg(COND, EXPR)
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
static const COND P
parity (PF = 1)
SSERegister XMM0("XMM0", 0x0, false, 0x0 *16, 16)
virtual bool is_commutable() const
True if the operands of the instruction are commutable.
const parseddesc_t parseddesc
void insert_pred(MachineBasicBlock *value)
Appends the given element value to the list of predecessors.
MachineBasicBlock * new_block() const
IdxTy get_begin() const
Get the index of the first element.
MachineOperand * get_op(Instruction *I) const
#define ABORT_MSG(EXPR_SHORT, EXPR_LONG)
BeginInstRef & get_then_target()
JITData * get_JITData() const
Add Scalar Double-Precision Floating-Point Values.
BeginInstRef & succ_front()
Ref get_Ref(std::size_t t)
get a new reference to the segment
double get_Double() const
match_iterator match_begin()
static const COND LE
less or equal (ZF = 1 or SF <> OF)
const DataSegment & get_DataSegment() const
get DataSegment
Divide Scalar Double-Precision Floating-Point Values.
FloatHandling get_FloatHandling() const
MethodDescriptor TODO: more info.