25 #ifndef _JIT_COMPILER2_X86_64EMITHELPER
26 #define _JIT_COMPILER2_X86_64EMITHELPER
95 bool opsiz64 =
true) {
96 const unsigned rex_w = 3;
97 const unsigned rex_r = 2;
99 const unsigned rex_b = 0;
111 if (rm && rm->extented) {
120 const unsigned rex_w = 3;
121 const unsigned rex_r = 2;
122 const unsigned rex_x = 1;
123 const unsigned rex_b = 0;
134 if (reg2 && reg2->extented) {
137 if (reg3 && reg3->extented) {
145 return index || base == &
RSP || base == &
R12;
157 if (disp == 0 || base == 0x05 ) {
161 else if (fits_into<s1>(disp)) {
164 }
else if (fits_into<s4>(disp)) {
169 ABORT_MSG(
"Illegal displacement",
"Displacement: "<<disp);
179 modrm = mod << modrm_mod;
180 modrm |= reg << modrm_reg;
181 modrm |= rm << modrm_rm;
197 sib = scale << sib_scale;
199 sib |=
index->get_index() << sib_index;
212 const unsigned modrm_mod = 6;
213 const unsigned modrm_reg = 3;
214 const unsigned modrm_rm = 0;
216 u1 modrm = (0x3 &
mod) << modrm_mod;
217 modrm |= (0x7 & reg) << modrm_reg;
218 modrm |= (0x7 & rm) << modrm_rm;
241 assert(
false &&
"Not a stackslot");
279 for (std::size_t
i = 0,
e = CSB.
size();
i <
e; ++
i) {
286 switch (
op->get_OperandID()) {
303 assert(0 &&
"Not a stackslot");
322 for (
int i = 0,
e =
sizeof(T) ;
i <
e ; ++
i) {
323 code[
i + 1] = (
u1) 0xff & (opcode >> (8 * (e -
i - 1)));
335 for (
int i = 0,
e =
sizeof(T) ;
i <
e ; ++
i) {
336 code[
i + 1] = (
u1) 0xff & (opcode >> (8 * (e -
i - 1)));
340 code[2 +
sizeof(T)] =
u1(disp);
349 for (
int i = 0,
e =
sizeof(T) ;
i <
e ; ++
i) {
350 code[
i + 1] = (
u1) 0xff & (opcode >> (8 * (e -
i - 1)));
354 code[2 +
sizeof(T)] =
u1( 0xff & (disp >> (0 * 8)));
355 code[3 +
sizeof(T)] =
u1( 0xff & (disp >> (1 * 8)));
356 code[4 +
sizeof(T)] =
u1( 0xff & (disp >> (2 * 8)));
357 code[5 +
sizeof(T)] =
u1( 0xff & (disp >> (3 * 8)));
359 template <
class O,
class I>
366 for (
int i = 0,
e =
sizeof(O) ;
i <
e ; ++
i) {
367 code[
i + 1] = (
u1) 0xff & (opcode >> (8 * (e -
i - 1)));
369 for (
int i = 0, e =
sizeof(
I) ;
i <
e ; ++
i) {
370 code[
i +
sizeof(O) + 1] = (
u1) 0xff & (imm >> (8 *
i));
374 template <
class O,
class I>
381 for (
int i = 0,
e =
sizeof(O) ;
i <
e ; ++
i) {
382 code[
i + 1] = (
u1) 0xff & (opcode >> (8 * (e -
i - 1)));
385 for (
int i = 0, e =
sizeof(
I) ;
i <
e ; ++
i) {
386 code[
i +
sizeof(O) + 2] = (
u1) 0xff & (imm >> (8 *
i));
390 template <
class O,
class I>
394 for (
int i = 0,
e =
sizeof(O) ;
i <
e ; ++
i) {
395 code[
i] = (
u1) 0xff & (opcode >> (8 * (e -
i - 1)));
397 for (
int i = 0, e =
sizeof(
I) ;
i <
e ; ++
i) {
398 code[
i +
sizeof(O)] = (
u1) 0xff & (imm >> (8 *
i));
402 template <
class O,
class I>
404 assert(code.
size() == (
sizeof(O) +
sizeof(
I)));
406 for (
int i = 0,
e =
sizeof(O) ;
i <
e ; ++
i) {
407 code[
i] = (
u1) 0xff & (opcode >> (8 * (e -
i - 1)));
409 for (
int i = 0, e =
sizeof(
I) ;
i <
e ; ++
i) {
410 code[
i +
sizeof(O)] = (
u1) 0xff & (imm >> (8 *
i));
414 template <
class I,
class Seg>
416 assert(seg.size() ==
sizeof(
I));
418 for (
int i = 0,
e =
sizeof(
I) ;
i <
e ; ++
i) {
419 seg[
i] = (
u1) 0xff & *(reinterpret_cast<u1*>(&imm) +
i);
424 static void emit (
CodeMemory* CM,
u1 primary_opcode,
GPInstruction::OperandSize op_size,
MachineOperand *src,
MachineOperand *dst,
u1 secondary_opcode = 0,
u1 op_reg = 0,
u1 prefix = 0,
bool prefix_0f =
false,
bool encode_dst =
true,
bool imm_sign_extended =
false) {
437 code += primary_opcode;
438 if (secondary_opcode != 0x00) {
439 code += secondary_opcode;
454 code += primary_opcode;
455 if (secondary_opcode != 0x00) {
456 code += secondary_opcode;
461 if (!imm_sign_extended) {
466 code += (
u1) 0xff & (immval >> 0x00);
472 code += (
u1) 0xff & (immval >> 0x00);
473 code += (
u1) 0xff & (immval >> 0x08);
479 code += (
u1) 0xff & (immval >> 0x00);
480 code += (
u1) 0xff & (immval >> 0x08);
481 code += (
u1) 0xff & (immval >> 0x10);
482 code += (
u1) 0xff & (immval >> 0x18);
488 code += (
u1) 0xff & (immval >> 0x00);
489 code += (
u1) 0xff & (immval >> 0x08);
490 code += (
u1) 0xff & (immval >> 0x10);
491 code += (
u1) 0xff & (immval >> 0x18);
492 code += (
u1) 0xff & (immval >> 0x20);
493 code += (
u1) 0xff & (immval >> 0x28);
494 code += (
u1) 0xff & (immval >> 0x30);
495 code += (
u1) 0xff & (immval >> 0x38);
500 "dst: " << dst_reg <<
" src: " << src_imm <<
" op_code: " << primary_opcode);
507 code += (
u1) 0xff & (immval >> 0x00);
509 else if (fits_into<s2>(src_imm->
get_value<
s8>())) {
511 code += (
u1) 0xff & (immval >> 0x00);
512 code += (
u1) 0xff & (immval >> 0x08);
514 else if (fits_into<s4>(src_imm->
get_value<
s8>())) {
516 code += (
u1) 0xff & (immval >> 0x00);
517 code += (
u1) 0xff & (immval >> 0x08);
518 code += (
u1) 0xff & (immval >> 0x10);
519 code += (
u1) 0xff & (immval >> 0x18);
523 code += (
u1) 0xff & (immval >> 0x00);
524 code += (
u1) 0xff & (immval >> 0x08);
525 code += (
u1) 0xff & (immval >> 0x10);
526 code += (
u1) 0xff & (immval >> 0x18);
527 code += (
u1) 0xff & (immval >> 0x20);
528 code += (
u1) 0xff & (immval >> 0x28);
529 code += (
u1) 0xff & (immval >> 0x30);
530 code += (
u1) 0xff & (immval >> 0x38);
543 code += primary_opcode;
544 if (secondary_opcode != 0x00) {
545 code += secondary_opcode;
547 if (fits_into<s1>(index)) {
553 code += (
u1) 0xff & (index >> 0x00);
554 code += (
u1) 0xff & (index >> 0x08);
555 code += (
u1) 0xff & (index >> 0x10);
556 code += (
u1) 0xff & (index >> 0x18);
565 u1 rex =
get_rex(dst_reg, base, op_size, index);
572 code += primary_opcode;
573 if (secondary_opcode != 0x00) {
574 code += secondary_opcode;
576 bool sib =
use_sib(base, index);
577 code +=
get_modrm(dst_reg,base,disp,sib);
579 code +=
get_sib(base,index,scale);
582 if (fits_into<s1>(disp) && base != &
RBP) {
586 code += (
u1) 0xff & (disp >> 0x00);
587 code += (
u1) 0xff & (disp >> 0x08);
588 code += (
u1) 0xff & (disp >> 0x10);
589 code += (
u1) 0xff & (disp >> 0x18);
595 "dst_reg: " << dst_reg <<
" src: " << src <<
" op_code: " << primary_opcode);
609 code += primary_opcode;
610 if (secondary_opcode != 0x00) {
611 code += secondary_opcode;
613 if (fits_into<s1>(index)) {
619 code += (
u1) 0xff & (index >> 0x00);
620 code += (
u1) 0xff & (index >> 0x08);
621 code += (
u1) 0xff & (index >> 0x10);
622 code += (
u1) 0xff & (index >> 0x18);
627 "dst: " << dst <<
" src: " << src <<
" op_code: " << primary_opcode);
638 u1 rex =
get_rex(src_reg, base, op_size, index);
645 code += primary_opcode;
646 if (secondary_opcode != 0x00) {
647 code += secondary_opcode;
649 bool sib =
use_sib(base, index);
650 code +=
get_modrm(src_reg,base,disp,sib);
652 code +=
get_sib(base,index,scale);
655 if (fits_into<s1>(disp) && base != &
RBP) {
659 code += (
u1) 0xff & (disp >> 0x00);
660 code += (
u1) 0xff & (disp >> 0x08);
661 code += (
u1) 0xff & (disp >> 0x10);
662 code += (
u1) 0xff & (disp >> 0x18);
668 u1 rex =
get_rex(NULL, base, op_size, index);
675 code += primary_opcode;
676 if (secondary_opcode != 0x00) {
677 code += secondary_opcode;
679 bool sib =
use_sib(base, index);
680 code +=
get_modrm(op_reg,base->get_index(),disp,sib);
682 code +=
get_sib(base,index,scale);
685 if (fits_into<s1>(disp) && base != &
RBP) {
689 code += (
u1) 0xff & (disp >> 0x00);
690 code += (
u1) 0xff & (disp >> 0x08);
691 code += (
u1) 0xff & (disp >> 0x10);
692 code += (
u1) 0xff & (disp >> 0x18);
699 code += (
u1) 0xff & (immval >> 0x00);
705 code += (
u1) 0xff & (immval >> 0x00);
706 code += (
u1) 0xff & (immval >> 0x08);
712 code += (
u1) 0xff & (immval >> 0x00);
713 code += (
u1) 0xff & (immval >> 0x08);
714 code += (
u1) 0xff & (immval >> 0x10);
715 code += (
u1) 0xff & (immval >> 0x18);
721 ABORT_MSG(
"Invalid Immediate Size (imm64 with disp not allowed)",
722 "dst: " << dst_mod <<
" src: " << src_imm <<
" op_code: " << primary_opcode);
725 code += (
u1) 0xff & (immval >> 0x00);
726 code += (
u1) 0xff & (immval >> 0x08);
727 code += (
u1) 0xff & (immval >> 0x10);
728 code += (
u1) 0xff & (immval >> 0x18);
729 code += (
u1) 0xff & (immval >> 0x20);
730 code += (
u1) 0xff & (immval >> 0x28);
731 code += (
u1) 0xff & (immval >> 0x30);
732 code += (
u1) 0xff & (immval >> 0x38);
737 "dst: " << dst_mod <<
" src: " << src_imm <<
" op_code: " << primary_opcode);
743 "dst: " << dst_mod <<
" src: " << src <<
" op_code: " << primary_opcode);
748 "dst: " << dst <<
" src: " << src <<
" op_code: " << primary_opcode);
758 static void InstructionEncoding::reg2reg<u1>(
jlong jlong jlong jlong jint jmethodID jint slot
X86_64Register * cast_to< X86_64Register >(Register *reg)
CodeFragment get_CodeFragment(std::size_t size)
get a code fragment
bool is_stackslot() const
u1 get_modrm(u1 reg, u1 base, s4 disp, bool use_sib=false)
alloc::deque< u1 >::type Container
static void reg2rbp_disp8(CodeMemory *CM, T opcode, X86_64Register *reg, s1 disp)
virtual ManagedStackSlot * to_ManagedStackSlot()
X86_64Register * getBase()
u1 get_modrm_reg2reg(X86_64Register *reg, X86_64Register *rm)
static void reg2reg(CodeMemory *CM, T opcode, X86_64Register *reg, X86_64Register *rm)
alloc::set< EdgeType >::type mod
u1 get_modrm_1reg(u1 reg, X86_64Register *rm)
bool is_StackSlot() const
Container::const_iterator const_iterator
std::deque< T, Allocator< T > > type
static void imm_op(CodeMemory *CM, O opcode, I imm)
void add_CodeSegmentBuilder(CodeMemory *CM, const CodeSegmentBuilder &CSB)
X86_64Register * operator*() const
u1 get_sib(X86_64Register *base, X86_64Register *index=NULL, u1 scale=1)
Extension of the ModR/M reg field.
bool use_sib(X86_64Register *base, X86_64Register *index)
GPRegister R12("R12", 0x4, true, 0xc *8, 8)
u1 get_rex(X86_64Register *reg, X86_64Register *rm=NULL, bool opsiz64=true)
const_iterator end() const
CodeSegmentBuilder & operator+=(u1 d)
Extension of the SIB index field.
static void reg2imm(CodeMemory *CM, O opcode, X86_64Register *reg, I imm)
u1 operator[](std::size_t i) const
const_iterator begin() const
GPRegister RBP("RBP", 0x5, false, 0x5 *8, 8)
Container::iterator iterator
u4 get_number_of_machine_slots() const
0 = Operand size determined by CS.D 1 = 64 Bit Operand Size
X86_64ModRMOperand * cast_to< X86_64ModRMOperand >(MachineOperand *op)
A "virtual" slot that will eventually be mapped to a machine-level slot.
StackSlot * cast_to< StackSlot >(MachineOperand *op)
unsigned get_index() const
std::size_t size() const
size of the reference
X86_64Register * operator->() const
Immediate * cast_to< Immediate >(MachineOperand *op)
static void emit(CodeMemory *CM, u1 primary_opcode, GPInstruction::OperandSize op_size, MachineOperand *src, MachineOperand *dst, u1 secondary_opcode=0, u1 op_reg=0, u1 prefix=0, bool prefix_0f=false, bool encode_dst=true, bool imm_sign_extended=false)
X86_64Register * getIndex()
static void reg2imm_modrm(CodeMemory *CM, O opcode, X86_64Register *reg, X86_64Register *rm, I imm)
int get_stack_position(MachineOperand *op)
StackSlotManager * get_parent() const
u1 get_modrm_u1(u1 mod, u1 reg, u1 rm)
Container::value_type value_type
OpReg(X86_64Register *reg)
Operands that can be directly used by the machine (register, memory, stackslot)
u1 & operator[](std::size_t i)
Extension of the ModR/M r/m field, SIB base field, or Opcode reg field.
static void imm(Seg seg, I imm)
bool is_ManagedStackSlot() const
static void imm_op(CodeFragment &code, O opcode, I imm)
REX & operator+(OpReg reg)
static void reg2rbp_disp32(CodeMemory *CM, T opcode, X86_64Register *reg, s4 disp)
virtual StackSlot * to_StackSlot()
#define ABORT_MSG(EXPR_SHORT, EXPR_LONG)
GPRegister RSP("RSP", 0x4, false, 0x4 *8, 8)
bool is_Immediate() const