CACAO
codegen.hpp
Go to the documentation of this file.
1 /* src/vm/jit/sparc64/codegen.hpp - code generation macros and
2  definitions for SPARC64
3 
4  Copyright (C) 1996-2013
5  CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
6 
7  This file is part of CACAO.
8 
9  This program is free software; you can redistribute it and/or
10  modify it under the terms of the GNU General Public License as
11  published by the Free Software Foundation; either version 2, or (at
12  your option) any later version.
13 
14  This program is distributed in the hope that it will be useful, but
15  WITHOUT ANY WARRANTY; without even the implied warranty of
16  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  General Public License for more details.
18 
19  You should have received a copy of the GNU General Public License
20  along with this program; if not, write to the Free Software
21  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22  02110-1301, USA.
23 
24 */
25 
26 #ifndef CODEGEN_HPP_
27 #define CODEGEN_HPP_ 1
28 
29 #include "config.h"
30 #include "vm/types.hpp"
31 
32 #include "vm/jit/jit.hpp"
33 
34 #include "md-abi.hpp" /* for INT_NATARG_CNT */
35 
36 #include <cassert>
37 
38 /* debug defines **************************************************************/
39 #ifndef NDEBUG
40 # define PASS13BIT(imm) ((((s4)(imm)&0x1fff)<<19)>>19)
41 #else
42 # define PASS13BIT(imm) imm
43 #endif
44 
45 
46 /* from md-abi.c */
48 
49 /* branch defines *************************************************************/
50 
51 #define BRANCH_NOPS \
52  do { \
53  M_NOP; \
54  M_NOP; \
55  } while (0)
56 
57 
58 /* patcher defines ************************************************************/
59 
60 #define PATCHER_CALL_INSTRUCTIONS 2 /* number of instructions */
61 #define PATCHER_CALL_SIZE 2 * 4 /* size in bytes of a patcher call */
62 
63 #define EXCEPTION_CHECK_INSTRUCTIONS 3 /* number of instructions */
64 #define EXCEPTION_CHECK_SIZE 3 * 4 /* byte size of an exception check */
65 
66 #define PATCHER_NOPS \
67  do { \
68  M_NOP; \
69  M_NOP; \
70  } while (0)
71 
72 
73 /* additional functions and macros to generate code ***************************/
74 
75 
76 /* MCODECHECK(icnt) */
77 
78 #define MCODECHECK(icnt) \
79  do { \
80  if ((cd->mcodeptr + (icnt) * 4) > cd->mcodeend) \
81  codegen_increase(cd); \
82  } while (0)
83 
84 
85 #define ALIGNCODENOP \
86  if ((s4) ((ptrint) cd->mcodeptr & 7)) { \
87  M_NOP; \
88  }
89 
90 #define ALIGN_STACK_SLOTS(slots) \
91  if (slots & 1) \
92  slots++;
93 
94 
95 #define M_COPY(s,d) emit_copy(jd, iptr, (s), (d))
96 #define ICONST(d,c) emit_iconst(cd, (d), (c))
97 #define LCONST(d,c) emit_lconst(cd, (d), (c))
98 
99 
100 
101 
102 /********************** instruction formats ***********************************/
103 
104 #define REG 0
105 #define IMM 1
106 
107 /* 3-address-operations: M_OP3
108  * op ..... opcode
109  * op3 ..... operation
110  * rs1 ..... register number source 1
111  * rs2 ..... register number or constant integer source 2
112  * rd ..... register number destination
113  * imm ..... switch to use rs2 as constant 13bit integer
114  * (REG means: use b as register number)
115  * (IMM means: use b as signed immediate value)
116  */
117 #define M_OP3(op,op3,rd,rs1,rs2,imm) \
118  do { \
119  assert(check_13bit_imm(rs2)); \
120  *((u4 *) cd->mcodeptr) = ((((s4) (op)) << 30) | ((rd) << 25) | ((op3) << 19) | ((rs1) << 14) | ((imm)<<13) | (imm?((rs2)&0x1fff):(rs2)) ); \
121  cd->mcodeptr += 4; \
122  } while (0)
123 
124 
125 #define M_OP3_GET_RD(x) (((x) >> 25) & 0x1f)
126 #define M_OP3_GET_RS(x) (((x) >> 14) & 0x1f)
127 #define M_OP3_GET_IMM(x) ( (x) & 0x1fff)
128 
129 /* 3-address-operations: M_OP3C
130  * rcond ... condition opcode
131  * rs2 ..... register number or 10bit signed immediate
132  *
133  */
134 #define M_OP3C(op,op3,rcond,rd,rs1,rs2,imm) \
135  do { \
136  *((u4 *) cd->mcodeptr) = ((((s4) (op)) << 30) | ((rd) << 25) | ((op3) << 19) | ((rs1) << 14) | ((imm)<<13) | \
137  ((rcond) << 10) | (imm?((rs2)&0x3ff):(rs2)) ); \
138  cd->mcodeptr += 4; \
139  } while (0)
140 
141 
142 /* shift Format 3
143  * op ..... opcode
144  * op3..... op3 code
145  * rs1 .... source reg 1
146  * rs2 .... source reg 2 or immediate shift count (5 or 6 bits long depending whether 32 or 64 bit shift)
147  * rd ..... dest reg
148  * imm .... switch for constant
149  * x ...... 0 => 32, 1 => 64 bit shift
150  */
151 #define M_SHFT(op,op3,rs1,rs2,rd,imm,x) \
152  do { \
153  *((u4 *) cd->mcodeptr) = ( (((s4)(op)) << 30) | ((op3) << 19) | ((rd) << 25) | ((rs1) << 14) | ((imm) << 13) | \
154  ((x) << 12) | (((imm) && (x))?((rs2) & 0x3f):((rs2) & 0x1f)) ); \
155  cd->mcodeptr += 4; \
156  } while (0)
157 
158 
159 /* Format 4
160  * op ..... opcode
161  * op3..... op3 code
162  * cond ... condition opcode
163  * rs2 .... source 2 or signed 11-bit constant
164  * rd ..... dest reg
165  * imm .... switch for constant
166  * cc{0-2} 32-bit 64-bit or fp condition
167  */
168  #define M_FMT4(op,op3,rd,rs2,cond,cc2,cc1,cc0,imm) \
169  do { \
170  *((u4 *) cd->mcodeptr) = ( (((s4)(op)) << 30) | ((op3) << 19) | ((rd) << 25) | ((cc2) << 18) | ((cond) << 14) | \
171  ((imm) << 13) | ((cc1) << 12) | ((cc0) << 11) | ((rs2) & 0x7ff) ); \
172  cd->mcodeptr += 4; \
173  } while (0)
174 
175 
176 #define FR_X(r) (((r)<<1) + 1) /* transpose macro for floats which reside in upper half of double word */
177 #define DR_X(r) ((((r)*2)|(((r)*2)>>5)) & 0x1f) /* transpose & pack double, see SPARC spec. */
178 
179 /* 3-address-floating-point-operation
180  * op .... opcode
181  * op3,opf .... function-number
182  * rd .... dest reg
183  * rs1 ... source reg (-1 signals unused)
184  * rs2 ... source reg
185  *
186  *
187  */
188 #define M_FOP3(op,op3,opf,rd,rs1,rs2) \
189  do { \
190  *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | ((rd)<<25) | ((op3)<<19) | ((((rs1)==-1)?0:(rs1)) << 14) | \
191  ((opf)<<5) | (rs2) ); \
192  cd->mcodeptr += 4; \
193  } while (0)
194 /* float addressing */
195 #define M_FOP3_FX(op,op3,opf,rd,rs1,rs2) \
196  do { \
197  *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | (FR_X(rd)<<25) | ((op3)<<19) | ((((rs1)==-1)?0:FR_X(rs1)) << 14) | \
198  ((opf)<<5) | FR_X(rs2) ); \
199  cd->mcodeptr += 4; \
200  } while (0)
201 /* double addressing */
202 #define M_FOP3_DX(op,op3,opf,rd,rs1,rs2) \
203  do { \
204  *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | (DR_X(rd)<<25) | ((op3)<<19) | ((((rs1)==-1)?0:DR_X(rs1)) << 14) | \
205  ((opf)<<5) | DR_X(rs2) ); \
206  cd->mcodeptr += 4; \
207  } while (0)
208 
209 /* 3-address-floating-point-compare-operation
210  * op .... opcode
211  * op3,opf .... function-number
212  * fcc ... floating point condition code (fcc0 - fcc3)
213  * rs1 ... source reg
214  * rs2 ... source reg
215  *
216  *
217  */
218 #define M_FCMP_DX(op,op3,opf,fcc,rs1,rs2) \
219  do { \
220  *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | ((fcc)<<25) | ((op3)<<19) | (DR_X(rs1) << 14) | \
221  ((opf)<<5) | DR_X(rs2) ); \
222  cd->mcodeptr += 4; \
223  } while (0)
224 
225 #define M_FCMP_FX(op,op3,opf,fcc,rs1,rs2) \
226  do { \
227  *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | ((fcc)<<25) | ((op3)<<19) | (FR_X(rs1) << 14) | \
228  ((opf)<<5) | FR_X(rs2) ); \
229  cd->mcodeptr += 4; \
230  } while (0)
231 
232 /**** format 2 operations ********/
233 
234 /* branch on integer reg instruction
235  op ..... opcode
236  rcond ...... condition to be tested
237  disp16 ... 16-bit relative address to be jumped to (divided by 4)
238  rs1 ..... register to be tested
239  p ..... prediction bit
240  anul .... annullment bit
241 */
242 #define M_BRAREG(op,rcond,rs1,disp16,p,anul) \
243  do { \
244  *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | ((anul)<<29) | (0<<28) | ((rcond)<<25) | (3<<22) | \
245  ( ((disp16)& 0xC000) << 6 ) | (p << 19) | ((rs1) << 14) | ((disp16)&0x3fff) ); \
246  cd->mcodeptr += 4; \
247  } while (0)
248 
249 
250 /* branch on integer reg instruction
251  op,op2 .... opcodes
252  cond ...... condition to be tested
253  disp19 ... 19-bit relative address to be jumped to (divided by 4)
254  ccx ..... 32(0) or 64(2) bit test
255  p ..... prediction bit
256  anul .... annullment bit
257 */
258 #define M_BRACC(op,op2,cond,disp19,ccx,p,anul) \
259  do { \
260  *((u4 *) cd->mcodeptr) = ( (((s4)(op))<<30) | ((anul)<<29) | ((cond)<<25) | (op2<<22) | (ccx<<20) | \
261  (p << 19 ) | ((disp19) & 0x007ffff) ); \
262  cd->mcodeptr += 4; \
263  } while (0)
264 
265 
266 /************** end-user instructions (see a SPARC asm manual) ***************/
267 
268 #define M_SETHI(imm22, rd) \
269  do { \
270  *((u4 *) cd->mcodeptr) = ((((s4)(0x00)) << 30) | ((rd) << 25) | ((0x04)<<22) | ((imm22)&0x3FFFFF) ); \
271  cd->mcodeptr += 4; \
272  } while (0)
273 
274 #define M_NOP M_SETHI(0,0) /* nop */
275 
276 #define M_AND(rs1,rs2,rd) M_OP3(0x02,0x01,rd,rs1,rs2,REG) /* 64b c = a & b */
277 #define M_AND_IMM(rs1,rs2,rd) M_OP3(0x02,0x01,rd,rs1,rs2,IMM)
278 #define M_ANDCC(rs1,rs2,rd) M_OP3(0x02,0x11,rd,rs1,rs2,REG)
279 #define M_ANDCC_IMM(rs1,rs2,rd) M_OP3(0x02,0x11,rd,rs1,rs2,IMM)
280 
281 #define M_OR(rs1,rs2,rd) M_OP3(0x02,0x02,rd,rs1,rs2,REG) /* rd = rs1 | rs2 */
282 #define M_OR_IMM(rs1,rs2,rd) M_OP3(0x02,0x02,rd,rs1,rs2,IMM)
283 #define M_XOR(rs1,rs2,rd) M_OP3(0x02,0x03,rd,rs1,rs2,REG) /* rd = rs1 ^ rs2 */
284 #define M_XOR_IMM(rs1,rs2,rd) M_OP3(0x02,0x03,rd,rs1,rs2,IMM)
285 
286 #define M_MOV(rs,rd) M_OR(REG_ZERO, rs, rd) /* rd = rs */
287 #define M_CLR(rd) M_OR(REG_ZERO,REG_ZERO,rd) /* rd = 0 */
288 
289 
290 
291 #define M_SLLX(rs1,rs2,rd) M_SHFT(0x02,0x25,rs1,rs2,rd,REG,1) /* 64b rd = rs << rs2 */
292 #define M_SLLX_IMM(rs1,rs2,rd) M_SHFT(0x02,0x25,rs1,rs2,rd,IMM,1)
293 #define M_SLL(rs1,rs2,rd) M_SHFT(0x02,0x25,rs1,rs2,rd,REG,0) /* 32b rd = rs << rs2 */
294 #define M_SLL_IMM(rs1,rs2,rd) M_SHFT(0x02,0x25,rs1,rs2,rd,IMM,0)
295 #define M_SRLX(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,REG,1) /* 64b rd = rs >>>rs2 */
296 #define M_SRLX_IMM(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,IMM,1)
297 #define M_SRL(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,REG,0) /* 32b rd = rs >>>rs2 */
298 #define M_SRL_IMM(rs1,rs2,rd) M_SHFT(0x02,0x26,rs1,rs2,rd,IMM,0)
299 #define M_SRAX(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,REG,1) /* 64b rd = rs >> rs2 */
300 #define M_SRAX_IMM(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,IMM,1)
301 #define M_SRA(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,REG,0) /* 32b rd = rs >> rs2 */
302 #define M_SRA_IMM(rs1,rs2,rd) M_SHFT(0x02,0x27,rs1,rs2,rd,IMM,0)
303 
304 #define M_ISEXT(rs,rd) M_SRA(rs,REG_ZERO,rd) /* sign extend 32 bits*/
305 
306 
307 #define M_ADD(rs1,rs2,rd) M_OP3(0x02,0x00,rd,rs1,rs2,REG) /* 64b rd = rs1 + rs2 */
308 #define M_ADD_IMM(rs1,rs2,rd) M_OP3(0x02,0x00,rd,rs1,rs2,IMM)
309 #define M_SUB(rs1,rs2,rd) M_OP3(0x02,0x04,rd,rs1,rs2,REG) /* 64b rd = rs1 - rs2 */
310 #define M_SUB_IMM(rs1,rs2,rd) M_OP3(0x02,0x04,rd,rs1,rs2,IMM)
311 #define M_MULX(rs1,rs2,rd) M_OP3(0x02,0x09,rd,rs1,rs2,REG) /* 64b rd = rs1 * rs2 */
312 #define M_MULX_IMM(rs1,rs2,rd) M_OP3(0x02,0x09,rd,rs1,rs2,IMM)
313 #define M_DIVX(rs1,rs2,rd) M_OP3(0x02,0x2d,rd,rs1,rs2,REG) /* 64b rd = rs1 / rs2 */
314 
315 #define M_SUBcc(rs1,rs2,rd) M_OP3(0x02,0x14,rd,rs1,rs2,REG) /* sets xcc and icc */
316 #define M_SUBcc_IMM(rs1,rs2,rd) M_OP3(0x02,0x14,rd,rs1,rs2,IMM) /* sets xcc and icc */
317 
318 
319 
320 /**** compare and conditional ALU operations ***********/
321 
322 #define M_CMP(rs1,rs2) M_SUBcc(rs1,rs2,REG_ZERO)
323 #define M_CMP_IMM(rs1,rs2) M_SUBcc_IMM(rs1,rs2,REG_ZERO)
324 
325 
326 /* move integer register on (64-bit) condition */
327 
328 #define M_XCMOVEQ(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x1,1,1,0,REG) /* a==b ? rd=rs */
329 #define M_XCMOVNE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x9,1,1,0,REG) /* a!=b ? rd=rs */
330 #define M_XCMOVLT(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x3,1,1,0,REG) /* a<b ? rd=rs */
331 #define M_XCMOVGE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xb,1,1,0,REG) /* a>=b ? rd=rs */
332 #define M_XCMOVLE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x2,1,1,0,REG) /* a<=b ? rd=rs */
333 #define M_XCMOVGT(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xa,1,1,0,REG) /* a>b ? rd=rs */
334 #define M_XCMOVULE(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x4,1,1,0,REG) /* a<=b ? rd=rs (u)*/
335 
336 #define M_XCMOVEQ_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x1,1,1,0,IMM) /* a==b ? rd=rs */
337 #define M_XCMOVNE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x9,1,1,0,IMM) /* a!=b ? rd=rs */
338 #define M_XCMOVLT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x3,1,1,0,IMM) /* a<b ? rd=rs */
339 #define M_XCMOVGE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xb,1,1,0,IMM) /* a>=b ? rd=rs */
340 #define M_XCMOVLE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x2,1,1,0,IMM) /* a<=b ? rd=rs */
341 #define M_XCMOVGT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0xa,1,1,0,IMM) /* a>b ? rd=rs */
342 #define M_XCMOVULE_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x4,1,1,0,IMM) /* a<=b ? rd=rs (u)*/
343 
344 /* move integer register on (fcc0) floating point condition */
345 
346 #define M_CMOVFGT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x6,0,0,0,IMM) /* fa>fb ? rd=rs */
347 #define M_CMOVFLT_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x4,0,0,0,IMM) /* fa<fb ? rd=rs */
348 #define M_CMOVFEQ_IMM(rs,rd) M_FMT4(0x2,0x2c,rd,rs,0x9,0,0,0,IMM) /* fa==fb ? rd=rs */
349 
350 /* move integer register on (32-bit) condition */
351 
352 
353 
354 /* move integer register on register condition */
355 
356 #define M_CMOVREQ(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x1,rd,rs1,rs2,REG) /* rs1==0 ? rd=rs2 */
357 #define M_CMOVRNE(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x5,rd,rs1,rs2,REG) /* rs1!=0 ? rd=rs2 */
358 #define M_CMOVRLE(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x2,rd,rs1,rs2,REG) /* rs1<=0 ? rd=rs2 */
359 #define M_CMOVRLT(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x3,rd,rs1,rs2,REG) /* rs1<0 ? rd=rs2 */
360 #define M_CMOVRGT(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x6,rd,rs1,rs2,REG) /* rs1>0 ? rd=rs2 */
361 #define M_CMOVRGE(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x7,rd,rs1,rs2,REG) /* rs1>=0 ? rd=rs2 */
362 
363 #define M_CMOVREQ_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x1,rd,rs1,rs2,IMM) /* rs1==0 ? rd=rs2 */
364 #define M_CMOVRNE_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x5,rd,rs1,rs2,IMM) /* rs1!=0 ? rd=rs2 */
365 #define M_CMOVRLE_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x2,rd,rs1,rs2,IMM) /* rs1<=0 ? rd=rs2 */
366 #define M_CMOVRLT_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x3,rd,rs1,rs2,IMM) /* rs1<0 ? rd=rs2 */
367 #define M_CMOVRGT_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x6,rd,rs1,rs2,IMM) /* rs1>0 ? rd=rs2 */
368 #define M_CMOVRGE_IMM(rs1,rs2,rd) M_OP3C(0x2,0x2f,0x7,rd,rs1,rs2,IMM) /* rs1>=0 ? rd=rs2 */
369 
370 
371 /**** big constant helpers *********/
372 
373 /* #define FITS_13BIT_IMM(x) ((x >= -4096) && (x <= 4095)) */
374 
375 bool fits_13(s4 disp);
376 s4 get_lopart_disp(s4 disp);
377 
378 #define abs(x) ((x) < 0 ? (-(x)) : (x))
379 
380 #define sethi_part(x) ((x)>>10)
381 #define setlo_part(x) ((x) & 0x3ff)
382 
383 #define DO_SETHI_REG(c,rd) \
384  do { \
385  if (c > 0) { \
386  M_SETHI(sethi_part(c), rd); \
387  if (setlo_part(c)) { \
388  M_OR_IMM(rd, setlo_part(c), rd); \
389  } \
390  } \
391  else { \
392  M_SETHI(sethi_part(~c), rd); \
393  M_XOR_IMM(rd, PASS13BIT(setlo_part(c) | 0xffffffffffff1c00), rd); \
394  } \
395  } while (0)
396 
397 #define DO_SETHI_PART(c,rs,rd) \
398  do { \
399  if (c > 0) { \
400  M_SETHI(sethi_part(c), rd); \
401  M_ADD(rs,rd,rd); \
402  } \
403  else { \
404  M_SETHI(sethi_part(-c), rd); \
405  M_SUB(rs,rd,rd); \
406  assert(sethi_part(c) != 0xf); \
407  } \
408  } while (0)
409 
410 
411 
412 
413 #define M_LDA(rd,rs,disp) \
414  do { \
415  if (fits_13(disp)) { \
416  M_AADD_IMM(rs,disp,rd); \
417  } \
418  else { \
419  DO_SETHI_REG(disp,rd); \
420  M_AADD(rd,rs,rd); \
421  } \
422  } while (0)
423 
424 /**** load/store operations ********/
425 
426 #define M_SLDU(rd,rs,disp) M_OP3(0x03,0x02,rd,rs,disp,IMM) /* 16-bit load, uns*/
427 #define M_SLDS(rd,rs,disp) M_OP3(0x03,0x0a,rd,rs,disp,IMM) /* 16-bit load, sig*/
428 #define M_BLDS(rd,rs,disp) M_OP3(0x03,0x09,rd,rs,disp,IMM) /* 8-bit load, sig */
429 
430 
431 #define M_LDX_INTERN(rd,rs,disp) M_OP3(0x03,0x0b,rd,rs,disp,IMM) /* 64-bit load, sig*/
432 #define M_LDX(rd,rs,disp) \
433  do { \
434  if (fits_13(disp)) { \
435  M_LDX_INTERN(rd,rs,disp); \
436  } \
437  else { \
438  DO_SETHI_PART(disp,rs,rd); \
439  M_LDX_INTERN(rd,rd,PASS13BIT(get_lopart_disp(disp))); \
440  } \
441  } while (0)
442 
443 #define M_ILD_INTERN(rd,rs,disp) M_OP3(0x03,0x08,rd,rs,disp,IMM) /* 32-bit load, sig */
444 #define M_ILD(rd,rs,disp) \
445  do { \
446  if (fits_13(disp)) { \
447  M_ILD_INTERN(rd,rs,disp); \
448  } \
449  else { \
450  DO_SETHI_PART(disp,rs,rd); \
451  M_ILD_INTERN(rd,rd,PASS13BIT(get_lopart_disp(disp))); \
452  } \
453  } while (0)
454 
455 
456 
457 #define M_SST(rd,rs,disp) M_OP3(0x03,0x06,rd,rs,disp,IMM) /* 16-bit store */
458 #define M_BST(rd,rs,disp) M_OP3(0x03,0x05,rd,rs,disp,IMM) /* 8-bit store */
459 
460 /* Stores with displacement overflow should only happen with PUTFIELD or on */
461 /* the stack. The PUTFIELD instruction does not use REG_ITMP3 and a */
462 /* reg_of_var call should not use REG_ITMP3!!! */
463 
464 #define M_STX_INTERN(rd,rs,disp) M_OP3(0x03,0x0e,rd,rs,disp,IMM) /* 64-bit store */
465 #define M_STX(rd,rs,disp) \
466  do { \
467  if (fits_13(disp)) { \
468  M_STX_INTERN(rd,rs,disp); \
469  } \
470  else { \
471  DO_SETHI_PART(disp,rs,REG_ITMP3); \
472  M_STX_INTERN(rd,REG_ITMP3,PASS13BIT(get_lopart_disp(disp))); \
473  } \
474  } while (0)
475 
476 
477 #define M_IST_INTERN(rd,rs,disp) M_OP3(0x03,0x04,rd,rs,disp,IMM) /* 32-bit store */
478 #define M_IST(rd,rs,disp) \
479  do { \
480  if (fits_13(disp)) { \
481  M_IST_INTERN(rd,rs,disp); \
482  } \
483  else { \
484  DO_SETHI_PART(disp,rs,REG_ITMP3); \
485  M_IST_INTERN(rd,REG_ITMP3,PASS13BIT(get_lopart_disp(disp))); \
486  } \
487  } while (0)
488 
489 
490 /**** branch operations ********/
491 /* XXX prediction and annul bits currently set to defaults, but could be used for optimizations */
492 
493 /* branch on integer register */
494 
495 #define M_BEQZ(r,disp) M_BRAREG(0x0,0x1,r,disp,1,0) /* br r == 0 */
496 #define M_BLEZ(r,disp) M_BRAREG(0x0,0x2,r,disp,1,0) /* br r <= 0 */
497 #define M_BLTZ(r,disp) M_BRAREG(0x0,0x3,r,disp,1,0) /* br r < 0 */
498 #define M_BNEZ(r,disp) M_BRAREG(0x0,0x5,r,disp,1,0) /* br r != 0 */
499 #define M_BGTZ(r,disp) M_BRAREG(0x0,0x6,r,disp,1,0) /* br r > 0 */
500 #define M_BGEZ(r,disp) M_BRAREG(0x0,0x7,r,disp,1,0) /* br r >= 0 */
501 
502 
503 /* branch on (64-bit) integer condition codes */
504 
505 #define M_XBEQ(disp) M_BRACC(0x00,0x1,0x1,disp,2,1,0) /* branch a==b */
506 #define M_XBNE(disp) M_BRACC(0x00,0x1,0x9,disp,2,1,0) /* branch a!=b */
507 #define M_XBGT(disp) M_BRACC(0x00,0x1,0xa,disp,2,1,0) /* branch a>b */
508 #define M_XBLT(disp) M_BRACC(0x00,0x1,0x3,disp,2,1,0) /* branch a<b */
509 #define M_XBGE(disp) M_BRACC(0x00,0x1,0xb,disp,2,1,0) /* branch a>=b */
510 #define M_XBLE(disp) M_BRACC(0x00,0x1,0x2,disp,2,1,0) /* branch a<=b */
511 #define M_XBUGE(disp) M_BRACC(0x00,0x1,0xd,disp,2,1,0) /* br uns a>=b */
512 #define M_XBUGT(disp) M_BRACC(0x00,0x1,0xc,disp,2,1,0) /* br uns a>b */
513 #define M_XBULT(disp) M_BRACC(0x00,0x1,0x5,disp,2,1,0) /* br uns a<b */
514 
515 /* branch on (32-bit) integer condition codes */
516 
517 #define M_BR(disp) M_BRACC(0x00,0x1,0x8,disp,0,1,0) /* branch */
518 #define M_BEQ(disp) M_BRACC(0x00,0x1,0x1,disp,0,1,0) /* branch a==b */
519 #define M_BNE(disp) M_BRACC(0x00,0x1,0x9,disp,0,1,0) /* branch a!=b */
520 #define M_BGT(disp) M_BRACC(0x00,0x1,0xa,disp,0,1,0) /* branch a>b */
521 #define M_BLT(disp) M_BRACC(0x00,0x1,0x3,disp,0,1,0) /* branch a<b */
522 #define M_BGE(disp) M_BRACC(0x00,0x1,0xb,disp,0,1,0) /* branch a>=b */
523 #define M_BLE(disp) M_BRACC(0x00,0x1,0x2,disp,0,1,0) /* branch a<=b */
524 #define M_BULE(disp) M_BRACC(0x00,0x1,0x4,disp,0,1,0) /* br uns a<=b */
525 #define M_BUGT(disp) M_BRACC(0x00,0x1,0xc,disp,0,1,0) /* br uns a>b */
526 #define M_BULT(disp) M_BRACC(0x00,0x1,0x5,disp,0,1,0) /* br uns a<b */
527 
528 /* branch on (fcc0) floating point condition codes */
529 
530 #define M_FBR(disp) M_BRACC(0x00,0x5,0x8,disp,0,1,0) /* branch */
531 #define M_FBU(disp) M_BRACC(0x00,0x5,0x7,disp,0,1,0) /* unordered */
532 #define M_FBG(disp) M_BRACC(0x00,0x5,0x6,disp,0,1,0) /* branch a>b */
533 #define M_FBL(disp) M_BRACC(0x00,0x5,0x4,disp,0,1,0) /* branch a<b */
534 #define M_FBO(disp) M_BRACC(0x00,0x5,0xf,disp,0,1,0) /* br ordered */
535 
536 
537 
538 #define M_SAVE(rs1,rs2,rd) M_OP3(0x02,0x3c,rd,rs1,rs2,IMM)
539 #define M_SAVE_REG(rs1,rs2,rd) M_OP3(0x02,0x3c,rd,rs1,rs2,REG)
540 #define M_RESTORE(rs1,rs2,rd) M_OP3(0x02,0x3d,rd,rs1,rs2,IMM)
541 
542 
543 
544 #define M_JMP(rd,rs1,rs2) M_OP3(0x02,0x38,rd, rs1,rs2,REG) /* jump to rs1+rs2, adr of instr. saved to rd */
545 #define M_JMP_IMM(rd,rs1,rs2) M_OP3(0x02,0x38,rd, rs1,rs2,IMM)
546 #define M_RET(rs1,imm) M_OP3(0x02,0x38,REG_ZERO,rs1,imm,IMM) /* a jump which discards the current pc */
547 
548 #define M_RETURN(rs1,imm) M_OP3(0x02,0x39,0,rs1,imm,IMM) /* like ret, but does window restore */
549 
550 /**** floating point operations **/
551 
552 
553 #define M_DMOV(rs,rd) M_FOP3_DX(0x02,0x34,0x02,rd,-1,rs) /* rd = rs */
554 #define M_FMOV(rs,rd) M_FOP3_FX(0x02,0x34,0x01,rd,-1,rs) /* rd = rs */
555 
556 #define M_FMOV_INTERN(rs,rd) M_FOP3(0x02,0x34,0x01,rd,-1,rs) /* rd = rs */
557 
558 #define M_FNEG(rs,rd) M_FOP3_FX(0x02,0x34,0x05,rd,-1,rs) /* rd = -rs */
559 #define M_DNEG(rs,rd) M_FOP3_DX(0x02,0x34,0x06,rd,-1,rs) /* rd = -rs */
560 
561 #define M_FADD(rs1,rs2,rd) M_FOP3_FX(0x02,0x34,0x41,rd,rs1,rs2) /* float add */
562 #define M_DADD(rs1,rs2,rd) M_FOP3_DX(0x02,0x34,0x42,rd,rs1,rs2) /* double add */
563 #define M_FSUB(rs1,rs2,rd) M_FOP3_FX(0x02,0x34,0x045,rd,rs1,rs2) /* float sub */
564 #define M_DSUB(rs1,rs2,rd) M_FOP3_DX(0x02,0x34,0x046,rd,rs1,rs2) /* double sub */
565 #define M_FMUL(rs1,rs2,rd) M_FOP3_FX(0x02,0x34,0x049,rd,rs1,rs2) /* float mul */
566 #define M_DMUL(rs1,rs2,rd) M_FOP3_DX(0x02,0x34,0x04a,rd,rs1,rs2) /* double mul */
567 #define M_FDIV(rs1,rs2,rd) M_FOP3_FX(0x02,0x34,0x04d,rd,rs1,rs2) /* float div */
568 #define M_DDIV(rs1,rs2,rd) M_FOP3_DX(0x02,0x34,0x04e,rd,rs1,rs2) /* double div */
569 
570 
571 /**** compare and conditional FPU operations ***********/
572 
573 /* rd field 0 ==> fcc target unit is fcc0 */
574 #define M_FCMP(rs1,rs2) M_FCMP_FX(0x02,0x35,0x051,0,rs1,rs2) /* compare flt */
575 #define M_DCMP(rs1,rs2) M_FCMP_DX(0x02,0x35,0x052,0,rs1,rs2) /* compare dbl */
576 
577 /* conversion functions */
578 
579 #define M_CVTIF(rs,rd) M_FOP3_FX(0x02,0x34,0x0c4,rd,-1,rs)/* int2flt */
580 #define M_CVTID(rs,rd) M_FOP3(0x02,0x34,0x0c8,DR_X(rd),-1,FR_X(rs)) /* int2dbl */
581 #define M_CVTLF(rs,rd) M_FOP3(0x02,0x34,0x084,FR_X(rd),-1,DR_X(rs)) /* long2flt */
582 #define M_CVTLD(rs,rd) M_FOP3_DX(0x02,0x34,0x088,rd,-1,rs) /* long2dbl */
583 
584 #define M_CVTFI(rs,rd) M_FOP3_FX(0x02,0x34,0x0d1,rd,-1,rs) /* flt2int */
585 #define M_CVTDI(rs,rd) M_FOP3(0x02,0x34,0x0d2,FR_X(rd),-1,DR_X(rs)) /* dbl2int */
586 #define M_CVTFL(rs,rd) M_FOP3(0x02,0x34,0x081,DR_X(rd),-1,FR_X(rs)) /* flt2long */
587 #define M_CVTDL(rs,rd) M_FOP3_DX(0x02,0x34,0x082,rd,-1,rs) /* dbl2long */
588 
589 #define M_CVTFD(rs,rd) M_FOP3(0x02,0x34,0x0c9,DR_X(rd),-1,FR_X(rs)) /* flt2dbl */
590 #define M_CVTDF(rs,rd) M_FOP3(0x02,0x34,0x0c6,FR_X(rd),-1,DR_X(rs)) /* dbl2float */
591 
592 
593 
594 #define M_DLD_INTERN(rd,rs1,disp) M_OP3(0x03,0x23,DR_X(rd),rs1,disp,IMM) /* double (64-bit) load */
595 #define M_DLD(rd,rs,disp) \
596  do { \
597  s4 lo = (short) (disp); \
598  s4 hi = (short) (((disp) - lo) >> 13); \
599  if (hi == 0) { \
600  M_DLD_INTERN(rd,rs,lo); \
601  } else { \
602  M_SETHI(hi&0x3ffff8,rd); \
603  M_AADD(rs,rd,rd); \
604  M_DLD_INTERN(rd,rd,PASS13BIT(lo)); \
605  } \
606  } while (0)
607 /* Note for SETHI: sethi has a 22bit imm, only set upper 19 bits */
608 
609 #define M_FLD_INTERN(rd,rs1,disp) M_OP3(0x03,0x20,FR_X(rd),rs1,disp,IMM) /* float (32-bit) load */
610 #define M_FLD(rd,rs,disp) \
611  do { \
612  s4 lo = (short) (disp); \
613  s4 hi = (short) (((disp) - lo) >> 13); \
614  if (hi == 0) { \
615  M_FLD_INTERN(rd,rs,lo); \
616  } else { \
617  M_SETHI(hi&0x3ffff8,rd); \
618  M_AADD(rs,rd,rd); \
619  M_FLD_INTERN(rd,rd,PASS13BIT(lo)); \
620  } \
621  } while (0)
622 
623 
624 #define M_FST_INTERN(rd,rs,disp) M_OP3(0x03,0x24,FR_X(rd),rs,disp,IMM) /* float (32-bit) store */
625 #define M_FST(rd,rs,disp) \
626  do { \
627  s4 lo = (short) (disp); \
628  s4 hi = (short) (((disp) - lo) >> 13); \
629  if (hi == 0) { \
630  M_FST_INTERN(rd,rs,lo); \
631  } else { \
632  M_SETHI(hi&0x3ffff8,REG_ITMP3); \
633  M_AADD(rs,REG_ITMP3,REG_ITMP3); \
634  M_FST_INTERN(rd,REG_ITMP3,PASS13BIT(lo)); \
635  } \
636  } while (0)
637 
638 
639 #define M_DST_INTERN(rd,rs1,disp) M_OP3(0x03,0x27,DR_X(rd),rs1,disp,IMM) /* double (64-bit) store */
640 #define M_DST(rd,rs,disp) \
641  do { \
642  s4 lo = (short) (disp); \
643  s4 hi = (short) (((disp) - lo) >> 13); \
644  if (hi == 0) { \
645  M_DST_INTERN(rd,rs,lo); \
646  } else { \
647  M_SETHI(hi&0x3ffff8,REG_ITMP3); \
648  M_AADD(rs,REG_ITMP3,REG_ITMP3); \
649  M_DST_INTERN(rd,REG_ITMP3,PASS13BIT(lo)); \
650  } \
651  } while (0)
652 
653 
654 
655 /*
656  * Address pseudo instruction
657  */
658 
659 #define POINTERSHIFT 3 /* x8 */
660 
661 
662 #define M_ALD_INTERN(a,b,disp) M_LDX_INTERN(a,b,disp)
663 #define M_ALD(rd,rs,disp) M_LDX(rd,rs,disp)
664 #define M_AST_INTERN(a,b,disp) M_STX_INTERN(a,b,disp)
665 #define M_AST(a,b,disp) M_STX(a,b,disp)
666 #define M_AADD(a,b,c) M_ADD(a,b,c)
667 #define M_AADD_IMM(a,b,c) M_ADD_IMM(a,b,c)
668 #define M_ASUB_IMM(a,b,c) M_SUB_IMM(a,b,c)
669 #define M_ASLL_IMM(a,b,c) M_SLLX_IMM(a,b,c)
670 
671 #define M_ACMP(a,b) M_CMP(a,b)
672 #define M_ICMP(a,b) M_CMP(a,b)
673 
674 #endif // CODEGEN_HPP_
675 
676 
677 /*
678  * These are local overrides for various environment variables in Emacs.
679  * Please do not remove this and leave it at the end of the file, where
680  * Emacs will automagically detect them.
681  * ---------------------------------------------------------------------
682  * Local variables:
683  * mode: c++
684  * indent-tabs-mode: t
685  * c-basic-offset: 4
686  * tab-width: 4
687  * End:
688  * vim:noexpandtab:sw=4:ts=4:
689  */
#define INT_NATARG_CNT
Definition: md-abi.hpp:100
s4 nat_argintregs[INT_NATARG_CNT]
Definition: codegen.hpp:47
s4 get_lopart_disp(disp)
Definition: codegen.cpp:84
int32_t s4
Definition: types.hpp:45
bool fits_13(s4 disp)
Definition: codegen.cpp:74