CACAO
LinearScanAllocatorPass.cpp
Go to the documentation of this file.
1 /* src/vm/jit/compiler2/LinearScanAllocatorPass.cpp - LinearScanAllocatorPass
2 
3  Copyright (C) 2013
4  CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5 
6  This file is part of CACAO.
7 
8  This program is free software; you can redistribute it and/or
9  modify it under the terms of the GNU General Public License as
10  published by the Free Software Foundation; either version 2, or (at
11  your option) any later version.
12 
13  This program is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with this program; if not, write to the Free Software
20  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21  02110-1301, USA.
22 
23 */
24 
34 #include "vm/statistics.hpp"
35 #include "vm/options.hpp"
36 
37 #include <climits>
38 
39 #include "toolbox/logging.hpp"
40 
41 #define DEBUG_NAME "compiler2/LinearScanAllocator"
42 
43 STAT_REGISTER_GROUP(lsra_stat,"LSRA","Linear Scan Register Allocator")
44 STAT_REGISTER_GROUP_VAR(std::size_t,num_hints_followed,0,"hints followed",
45  "Number of Hints followed (i.e. no move)",lsra_stat)
46 STAT_REGISTER_GROUP_VAR(std::size_t,num_hints_not_followed,0,"hints not followed",
47  "Number of Hints not followed (i.e. move needed)",lsra_stat)
48 STAT_REGISTER_GROUP_VAR_EXTERN(std::size_t,num_remaining_moves,0,"remaining moves","Number of remaining moves",lsra_stat)
49 STAT_REGISTER_GROUP_VAR(std::size_t,num_split_moves,0,"spill moves","Number of split moves",lsra_stat)
50 STAT_REGISTER_GROUP_VAR(std::size_t,num_spill_loads,0,"spill loads","Number of spill loads",lsra_stat)
51 STAT_REGISTER_GROUP_VAR(std::size_t,num_spill_stores,0,"spill stores","Number of spill stores",lsra_stat)
52 STAT_REGISTER_GROUP_VAR(std::size_t,num_resolution_moves,0,"resolution moves","Number of move instructions for resolution",lsra_stat)
53 STAT_REGISTER_GROUP_VAR(std::size_t,num_allocate_free,0,"allocate free","Number of free register allocated",lsra_stat)
54 STAT_REGISTER_GROUP_VAR(std::size_t,num_allocate_blocked,0,"allocate blocked","Number of blocked register allocated (spill)",lsra_stat)
55 STAT_REGISTER_GROUP_VAR(std::size_t,num_fixed_intervals,0,"fixed intervals","Number of fixed (preallocated) intervals",lsra_stat)
56 STAT_REGISTER_GROUP_VAR(std::size_t,num_spill_blocked_fixed,0,"spilled blocked","Number blocked fixed intervals spilled",lsra_stat)
57 STAT_REGISTER_GROUP_VAR(std::size_t,num_resolution_regs,0,"resolution regs","Number registers allocated for resolution (cycles/stack-to-stack moves)",lsra_stat)
58 STAT_REGISTER_GROUP_VAR(std::size_t,num_resolution_stacks,0,"resolution stacks","Number stackslots allocated for resolution (no free registers)",lsra_stat)
59 
60 namespace cacao {
61 namespace jit {
62 namespace compiler2 {
63 
64 namespace option {
65  Option<bool> hints("Compiler2hints","compiler2: enable register hints",true,::cacao::option::xx_root());
66 }
67 
68 bool LinearScanAllocatorPass::StartComparator::operator()(const LivetimeInterval &lhs,
69  const LivetimeInterval &rhs) {
70  if (rhs.front().start < lhs.front().start) return true;
71  if (lhs.front().start < rhs.front().start) return false;
72  // rhs.front().start == lhs.front().start;
73  return lhs.back().end < rhs.back().end;
74 }
75 
76 namespace {
77 /**
78  * @Cpp11 use std::function
79  */
80 struct FreeUntilCompare: public std::binary_function<MachineOperand*,MachineOperand*,bool> {
81  bool operator()(MachineOperand *lhs, MachineOperand *rhs) const {
82  bool r = lhs->aquivalence_less(*rhs);
83  //LOG2("FreeUntilCompare: " << *lhs << " aquivalence_less " << *rhs << "=" << r << nl);
84  return r;
85  }
86 };
87 
88 typedef alloc::map<MachineOperand*,UseDef,FreeUntilCompare>::type FreeUntilMap;
89 
90 /**
91  * @Cpp11 use std::function
92  */
93 struct InitFreeUntilMap: public std::unary_function<MachineOperand*,void> {
94  FreeUntilMap &free_until_pos;
95  UseDef end;
96  /// Constructor
97  InitFreeUntilMap(FreeUntilMap &free_until_pos,
98  UseDef end) : free_until_pos(free_until_pos), end(end) {}
99 
100  void operator()(MachineOperand *MO) {
101  free_until_pos.insert(std::make_pair(MO,end));
102  }
103 };
104 
105 
106 /**
107  * @Cpp11 use std::function
108  */
109 struct SetActive: public std::unary_function<LivetimeInterval&,void> {
110  FreeUntilMap &free_until_pos;
111  UseDef start;
112  /// Constructor
113  SetActive(FreeUntilMap &free_until_pos,
114  UseDef start) : free_until_pos(free_until_pos), start(start) {}
115 
116  void operator()(LivetimeInterval& lti) {
117  MachineOperand *MO = lti.get_operand();
118  //LOG2("SetActive: " << lti << " operand: " << *MO << nl);
119  FreeUntilMap::iterator i = free_until_pos.find(MO);
120  if (i != free_until_pos.end()) {
121  //LOG2("set to zero" << nl);
122  LOG2("SetActive: " << lti << " operand: " << *MO << " to " << start << nl);
123  i->second = start;
124  }
125  }
126 };
127 
128 
129 /**
130  * @Cpp11 use std::function
131  */
132 struct SetIntersection: public std::unary_function<LivetimeInterval&,void> {
133  FreeUntilMap &free_until_pos;
134  LivetimeInterval &current;
135  UseDef pos;
136  UseDef end;
137  /// Constructor
138  SetIntersection(FreeUntilMap &free_until_pos,
139  LivetimeInterval &current, UseDef pos, UseDef end)
140  : free_until_pos(free_until_pos), current(current), pos(pos), end(end) {}
141 
142  void operator()(LivetimeInterval& lti) {
143  MachineOperand *MO = lti.get_operand();
144  FreeUntilMap::iterator i = free_until_pos.find(MO);
145  if (i != free_until_pos.end()) {
146  UseDef inter = next_intersection(lti,current,pos,end);
147  LOG2("SetIntersection: " << lti << " inter1: " << inter << nl);
148  if (inter != end) {
149  //UseDef inter = next_intersection(lti,current,pos,end);
150  if (inter < i->second) {
151  i->second = inter;
152  LOG2("SetIntersection: " << lti << " operand: " << *MO << " to " << i->second << nl);
153  }
154  }
155  }
156  }
157 };
158 
159 
160 /**
161  * @Cpp11 use std::function
162  */
163 struct FreeUntilMaxCompare
164  : public std::binary_function<const FreeUntilMap::value_type&, const FreeUntilMap::value_type&,bool> {
165  bool operator()(const FreeUntilMap::value_type &lhs, const FreeUntilMap::value_type &rhs) const {
166  return lhs.second < rhs.second;
167  }
168 };
169 
170 /**
171  * @Cpp11 use std::function
172  */
173 struct SetUseDefOperand: public std::unary_function<const UseDef&,void> {
174  MachineOperand *op;
175  /// Constructor
176  SetUseDefOperand(MachineOperand* op) : op(op) {}
177  void operator()(const UseDef &usedef) {
178  LOG2(" set def operand " << usedef << " to " << op << nl);
179  usedef.get_operand()->op = op;
180  }
181 };
182 
183 
184 MIIterator insert_move_before(MachineInstruction* move, UseDef usedef) {
185  MIIterator free_until_pos_reg = usedef.get_iterator();
186  assert(!(*free_until_pos_reg)->is_label());
187  return insert_before(free_until_pos_reg, move);
188 
189 }
190 
191 } // end anonymous namespace
192 
193 inline bool LinearScanAllocatorPass::try_allocate_free(LivetimeInterval &current, UseDef pos) {
194  LOG2(Magenta << "try_allocate_free: " << current << reset_color << nl);
195  // set free until pos of all physical operands to maximum
196  FreeUntilMap free_until_pos;
197  OperandFile op_file;
198  backend->get_OperandFile(op_file,current.get_operand());
199  // set to end
200  std::for_each(op_file.begin(),op_file.end(),
201  InitFreeUntilMap(free_until_pos, UseDef(UseDef::PseudoDef,MIS->mi_end())));
202 
203  // for each interval in active set free until pos to zero
204  std::for_each(active.begin(), active.end(),
205  SetActive(free_until_pos, UseDef(UseDef::PseudoUse,MIS->mi_begin())));
206 
207  // for each interval in inactive set free until pos to next intersection
208  std::for_each(inactive.begin(), inactive.end(),
209  SetIntersection(free_until_pos, current,pos,UseDef(UseDef::PseudoDef,MIS->mi_end())));
210 
211  LOG2("free_until_pos:" << nl);
212  if (DEBUG_COND_N(2)) {
213  for (FreeUntilMap::iterator i = free_until_pos.begin(), e = free_until_pos.end(); i != e; ++i) {
214  LOG2(" " << (i->first) << ": " << i->second << nl);
215  }
216  }
217 
218  MachineOperand *reg = NULL;
219  UseDef free_until_pos_reg(UseDef::PseudoUse,MIS->mi_begin());
220  // check for hints
221  MachineOperand *hint = current.get_hint();
222  if (option::hints && hint) {
223  if (hint->is_virtual()) {
224  // find the current store for hint
225  LivetimeInterval hint_lti = LA->get(hint);
226  hint = hint_lti.get_operand(pos.get_iterator());
227  }
228  LOG2("hint current pos " << hint << nl);
229  FreeUntilMap::const_iterator i = free_until_pos.find(hint);
230  if (i != free_until_pos.end()) {
231  free_until_pos_reg = i->second;
232  if ((free_until_pos_reg != UseDef(UseDef::PseudoUse,MIS->mi_begin())) &&
233  (current.back().end < free_until_pos_reg)) {
234  LOG2("hint available! " << hint << nl);
235  reg = hint;
236  STATISTICS(++num_hints_followed);
237  }
238  else {
239  STATISTICS(++num_hints_not_followed);
240  }
241  }
242  else {
243  STATISTICS(++num_hints_not_followed);
244  }
245  }
246  // if not follow hint!
247  if (!reg) {
248  // get operand with highest free until pos
249  FreeUntilMap::value_type x = *std::max_element(free_until_pos.begin(), free_until_pos.end(), FreeUntilMaxCompare());
250  reg = x.first;
251  free_until_pos_reg = x.second;
252  }
253 
254  LOG2("reg: " << reg << " pos: " << free_until_pos_reg << nl);
255 
256  if (free_until_pos_reg == UseDef(UseDef::PseudoUse,MIS->mi_begin())) {
257  // no register available without spilling
258  return false;
259  }
260  current.set_operand(reg);
261  LOG2("assigned operand: " << *reg << nl);
262  if (current.back().end < free_until_pos_reg) {
263  // register available for the whole interval
264  return true;
265  }
266  // register available for the first part of the interval
268  if(!(*free_until_pos_reg.get_iterator())->is_label()) {
269  // insert move
270  MachineInstruction *move = backend->create_Move(reg,tmp);
271  STATISTICS(++num_split_moves);
272  MIIterator split_pos = insert_move_before(move,free_until_pos_reg);
273  assert_msg(pos.get_iterator() < split_pos, "pos: " << pos << " free_until_pos_reg "
274  << free_until_pos_reg << " split: " << split_pos);
275  LOG2("splitting interval " << current << " at " << split_pos << " inserted move: " << move << nl);
276  LivetimeInterval lti = current.split_active(split_pos,&pos);
277  unhandled.push(lti);
278  } else {
279  // no move required
280  LOG2("splitting interval " << current << " at " << free_until_pos_reg << " no move required, op" << tmp << nl);
281  LivetimeInterval lti = current.split_phi_active(free_until_pos_reg.get_iterator(), tmp);
282  unhandled.push(lti);
283  }
284  return true;
285 }
286 
287 namespace {
288 /**
289  * @Cpp11 use std::function
290  */
291 struct SetNextUseActive: public std::unary_function<LivetimeInterval&,void> {
292  FreeUntilMap &next_use_pos;
293  UseDef pos;
294  UseDef end;
295  /// Constructor
296  SetNextUseActive(FreeUntilMap &next_use_pos,
297  UseDef pos, UseDef end) : next_use_pos(next_use_pos), pos(pos), end(end) {}
298 
299  void operator()(LivetimeInterval& lti) {
300  MachineOperand *MO = lti.get_operand();
301  //LOG2("SetActive: " << lti << " operand: " << *MO << nl);
302  FreeUntilMap::iterator i = next_use_pos.find(MO);
303  if (i != next_use_pos.end()) {
304  //LOG2("set to zero" << nl);
305  i->second = lti.next_usedef_after(pos,end);
306  LOG2("SetNextUseActive: " << lti << " operand: " << *MO << " to " << i->second << nl);
307  // work around self use issue
308  MachineInstruction *MI = *pos.get_iterator();
309  MachineOperand *origMO = lti.get_init_operand();
310  LOG2("MI: " << MI << " orig: " << origMO << nl);
311 
312  if (MI->find(origMO) != MI->end()) {
313  // the operand is used in the interval start instruction
314  // therefore we can not use the operand -> set to current pos
315  i->second = pos;
316  }
317 
318  }
319  }
320 };
321 
322 /**
323  * @Cpp11 use std::function
324  */
325 struct SetNextUseInactive: public std::unary_function<LivetimeInterval&,void> {
326  FreeUntilMap &free_until_pos;
327  LivetimeInterval &current;
328  UseDef pos;
329  UseDef end;
330  /// Constructor
331  SetNextUseInactive(FreeUntilMap &free_until_pos,
332  LivetimeInterval &current, UseDef pos, UseDef end)
333  : free_until_pos(free_until_pos), current(current), pos(pos), end(end) {}
334 
335  void operator()(LivetimeInterval& lti) {
336  MachineOperand *MO = lti.get_operand();
337  FreeUntilMap::iterator i = free_until_pos.find(MO);
338  if (i != free_until_pos.end() && next_intersection(lti,current,pos,end) != end) {
339  i->second = lti.next_usedef_after(pos,end);
340  LOG2("SetNextUseInactive: " << lti << " operand: " << *MO << " to " << i->second << nl);
341  }
342  }
343 };
344 
345 inline MachineOperand* get_stackslot(Backend *backend, Type::TypeID type) {
346  return backend->get_JITData()->get_StackSlotManager()->create_ManagedStackSlot(type);
347 }
348 
349 void split_active_position(LivetimeInterval lti, UseDef current_pos, UseDef next_use_pos, Backend *backend,
350  LinearScanAllocatorPass::UnhandledSetTy &unhandled) {
351  MachineOperand *MO = lti.get_operand();
352 
353  MachineOperand *stackslot = NULL;
354  if (current_pos.is_pseudo()) {
355  // We do not create moves for pseudo positions. This is handled during
356  // resolve.
357  if(lti.front().start == current_pos) {
358  // we are trying to spill a phi output operand interval
359  // just set the operand
360  stackslot = get_stackslot(backend,MO->get_type());
361  lti.set_operand(stackslot);
362  }
363  else {
364  // we are trying to spill a phi output operand interval
365  // split without a move
366  stackslot = get_stackslot(backend,MO->get_type());
367  lti = lti.split_phi_active(current_pos.get_iterator(),stackslot);
368  // XXX should we add the stack interval?
369  unhandled.push(lti);
370  }
371  }
372  else {
373  // move to stack
374  stackslot = get_stackslot(backend,MO->get_type());
375  MachineInstruction *move_to_stack = backend->create_Move(MO,stackslot);
376  MIIterator split_pos = insert_move_before(move_to_stack,current_pos);
377  STATISTICS(++num_spill_stores);
378  lti = lti.split_active(split_pos,&current_pos);
379  // XXX should we add the stack interval?
380  unhandled.push(lti);
381  }
382  // move from stack
383  if (!next_use_pos.is_pseudo()) {
384  // if the next use position is no real use/def we can ignore it
385  // the moves, if required, will be inserted by the resolution phase
386  MachineOperand *vreg = new VirtualRegister(MO->get_type());
387  MachineInstruction *move_from_stack = backend->create_Move(stackslot, vreg);
388  MIIterator split_pos = insert_move_before(move_from_stack,next_use_pos);
389  STATISTICS(++num_spill_loads);
390  lti = lti.split_active(split_pos);
391  unhandled.push(lti);
392  }
393 }
394 
395 /**
396  * @Cpp11 use std::function
397  */
398 struct SplitActive: public std::unary_function<LivetimeInterval&,void> {
399  MachineOperand *reg;
400  UseDef pos;
401  UseDef next_use_pos;
402  Backend *backend;
403  LinearScanAllocatorPass::UnhandledSetTy &unhandled;
404  /// Constructor
405  SplitActive(MachineOperand *reg , UseDef pos, UseDef next_use_pos, Backend *backend,
406  LinearScanAllocatorPass::UnhandledSetTy &unhandled)
407  : reg(reg), pos(pos), next_use_pos(next_use_pos), backend(backend), unhandled(unhandled) {}
408 
409  void operator()(LivetimeInterval& lti) {
410  MachineOperand *MO = lti.get_operand();
411  if (reg->aquivalent(*MO)) {
412  split_active_position(lti,pos,next_use_pos,backend, unhandled);
413  }
414  }
415 };
416 
417 /**
418  * @Cpp11 use std::function
419  */
420 struct SplitInactive: public std::unary_function<LivetimeInterval&,void> {
421  MachineOperand *reg;
422  UseDef pos;
423  LinearScanAllocatorPass::UnhandledSetTy &unhandled;
424  /// Constructor
425  SplitInactive(MachineOperand *reg , UseDef pos, LinearScanAllocatorPass::UnhandledSetTy &unhandled)
426  : reg(reg), pos(pos), unhandled(unhandled) {}
427 
428  void operator()(LivetimeInterval& lti) {
429  MachineOperand *MO = lti.get_operand();
430  if (reg->aquivalent(*MO)) {
431  //ABORT_MSG("Spill current not yet implemented","not yet implemented");
432  LivetimeInterval new_lti = lti.split_inactive(pos, new VirtualRegister(reg->get_type()));
433  STATISTICS(++num_split_moves);
434  assert(lti.front().start < new_lti.front().start);
435  unhandled.push(new_lti);
436  }
437  }
438 };
439 
440 inline void split_current(LivetimeInterval &current, Type::TypeID type, UseDef pos, UseDef end,
441  LinearScanAllocatorPass::UnhandledSetTy &unhandled, Backend *backend) {
442  LOG2("Spill current (" << current << ") at " << pos << nl);
443  // create move
444  MachineOperand *stackslot = get_stackslot(backend,type);
445  // only create new register if there is a user
446  if ( pos != end) {
447  MachineOperand *vreg = new VirtualRegister(type);
448  MachineInstruction *move_from_stack = backend->create_Move(stackslot,vreg);
449  // split
450  MIIterator split_pos = insert_move_before(move_from_stack,pos);
451  STATISTICS(++num_spill_loads);
452  LivetimeInterval new_lti = current.split_active(split_pos);
453  unhandled.push(new_lti);
454  }
455  // set operand
456  current.set_operand(stackslot);
457 }
458 
459 #if defined(ENABLE_LOGGING)
460 OStream& print_code(OStream &OS, MIIterator pos, MIIterator min, MIIterator max,
461  MIIterator::difference_type before, MIIterator::difference_type after) {
462  MIIterator i = pos;
463  MIIterator e = pos;
464  before = std::min(before,std::distance(min,pos));
465  after = std::min(after,std::distance(pos,max));
466  std::advance(i,-before);
467  std::advance(e,after+1);
468 
469  OS << "..." << nl;
470  for ( ; i != e ; ++i) {
471  if (i == pos) {
472  OS << BoldYellow;
473  }
474  OS << i << reset_color << nl;
475  }
476  return OS << "..." << nl;
477 }
478 #endif
479 } // end anonymous namespace
480 
481 inline bool LinearScanAllocatorPass::allocate_blocked(LivetimeInterval &current) {
482  LOG2(Blue << "allocate_blocked: " << current << reset_color << nl);
483  // set free until pos of all physical operands to maximum
484  FreeUntilMap next_use_pos;
485  OperandFile op_file;
486  backend->get_OperandFile(op_file,current.get_operand());
487  // set to end
488  std::for_each(op_file.begin(),op_file.end(),
489  InitFreeUntilMap(next_use_pos, UseDef(UseDef::PseudoDef,MIS->mi_end())));
490 
491  // for each interval it in active set next use to next use after start of current
492  std::for_each(active.begin(), active.end(),
493  SetNextUseActive(next_use_pos, current.front().start, UseDef(UseDef::PseudoDef,MIS->mi_end())));
494 
495  // for each interval in inactive intersecting with current set next pos to next use
496  std::for_each(inactive.begin(), inactive.end(),
497  SetNextUseInactive(next_use_pos, current, current.front().start, UseDef(UseDef::PseudoDef,MIS->mi_end())));
498 
499  LOG2("next_use_pos:" << nl);
500  for (FreeUntilMap::iterator i = next_use_pos.begin(), e = next_use_pos.end(); i != e; ++i) {
501  LOG2(" " << (i->first) << ": " << i->second << nl);
502  }
503 
504  // get register with highest nextUsePos
505  FreeUntilMap::value_type x = *std::max_element(next_use_pos.begin(), next_use_pos.end(), FreeUntilMaxCompare());
506  MachineOperand *reg = x.first;
507  UseDef next_use_pos_reg = x.second;
508  LOG2("reg: " << *reg << " pos: " << next_use_pos_reg << nl);
509 
510  UseDef next_use_pos_current = current.next_usedef_after(current.front().start,
511  UseDef(UseDef::PseudoDef,MIS->mi_end()));
512  if (next_use_pos_reg < next_use_pos_current && !current.front().start.is_def()) {
513  // all other intervals are used before current
514  // so spill current
515  // if the start of the current interval is a definition, we need to assign a register
516  split_current(current,current.get_operand()->get_type(), next_use_pos_current,
517  UseDef(UseDef::PseudoDef,MIS->mi_end()), unhandled, backend);
518  //ERROR_MSG("Spill current not yet implemented","not yet implemented");
519  return true;
520  }
521  else {
522  LOG2("Spill intervals blocking: " << *reg << nl);
523  // spill intervals that currently block reg
524 
525  // split for each interval it in active that blocks reg (there can be only one)
526  std::for_each(active.begin(), active.end(),
527  SplitActive(reg, current.front().start, next_use_pos_reg, backend, unhandled));
528 
529  // spill each interval it in inactive for reg at the end of the livetime hole
530  std::for_each(inactive.begin(), inactive.end(),
531  SplitInactive(reg, current.front().start, unhandled));
532  current.set_operand(reg);
533  LOG2("assigned operand: " << *reg << nl);
534  return true;
535  }
536  return false;
537 }
538 
539 void LinearScanAllocatorPass::initialize() {
540  while (!unhandled.empty()) {
541  unhandled.pop();
542  }
543  handled.clear();
544  inactive.clear();
545  active.clear();
546 }
547 
548 bool LinearScanAllocatorPass::allocate_unhandled() {
549  while(!unhandled.empty()) {
550  LivetimeInterval current = unhandled.top();
551  unhandled.pop();
552  LOG(BoldCyan << "current: " << current << reset_color<< nl);
553  UseDef pos = current.front().start;
554  DEBUG3(print_code(dbg(), pos.get_iterator(),MIS->mi_begin(), MIS->mi_end(), 2,2));
555 
556  // check for intervals in active that are handled or inactive
557  LOG2("check for intervals in active that are handled or inactive" << nl);
558  for (ActiveSetTy::iterator i = active.begin(), e = active.end();
559  i != e ; /* ++i */) {
560  LivetimeInterval act = *i;
561  switch (act.get_State(pos)) {
562  case LivetimeInterval::Handled:
563  LOG2(" LTI " << act << " moved from active to handled" << nl);
564  handled.push_back(act);
565  i = active.erase(i);
566  break;
567  case LivetimeInterval::Inactive:
568  LOG2(" LTI " << act << " moved from active to inactive" << nl);
569  inactive.push_back(act);
570  i = active.erase(i);
571  break;
572  default:
573  ++i;
574  }
575  }
576  // check for intervals in inactive that are handled or active
577  LOG2("check for intervals in inactive that are handled or active" << nl);
578  for (InactiveSetTy::iterator i = inactive.begin(), e = inactive.end();
579  i != e ; /* ++i */) {
580  LivetimeInterval inact = *i;
581  switch (inact.get_State(pos)) {
582  case LivetimeInterval::Handled:
583  LOG2(" LTI " << inact << " moved from inactive to handled" << nl);
584  handled.push_back(inact);
585  i = inactive.erase(i);
586  break;
587  case LivetimeInterval::Active:
588  LOG2(" LTI " << inact << " moved from inactive to active" << nl);
589  active.push_back(inact);
590  i = inactive.erase(i);
591  break;
592  default:
593  ++i;
594  }
595  }
596 
597  LOG2("active: ");
598  DEBUG2(print_container(dbg(),active.begin(),active.end())<<nl);
599  LOG2("inactive: ");
600  DEBUG2(print_container(dbg(),inactive.begin(),inactive.end())<<nl);
601 
602  if (current.get_operand()->is_virtual()) {
603  // Try to find a register
604  if (!try_allocate_free(current,pos)) {
605  if (!allocate_blocked(current)) {
606  ERROR_MSG("Register allocation failed",
607  "could not allocate register for " << current);
608  return false;
609  }
610  else {
611  STATISTICS(++num_allocate_blocked);
612  }
613  }
614  else {
615  STATISTICS(++num_allocate_free);
616  }
617  }
618  else {
619  STATISTICS(++num_fixed_intervals);
620  // fixed interval
621  for (ActiveSetTy::iterator i = active.begin(), e = active.end(); i != e ; ++i ) {
622  LivetimeInterval act = *i;
623  if (current.get_operand()->aquivalent(*act.get_operand())) {
624  MachineInstruction *MI = *pos.get_iterator();
625  if (!(MI->is_move() && MI->get_result().op->aquivalent(*MI->get(0).op))) {
626  LOG2("Spill intervals blocking fixed: " << current.get_operand() << nl);
627  // spill intervals that currently block reg
628  UseDef next_use_pos = act.next_usedef_after(
629  current.front().start,UseDef(UseDef::PseudoDef,MIS->mi_end()));
630  SplitActive split (current.get_operand(), current.front().start,
631  next_use_pos, backend, unhandled);
632  SplitActive::argument_type split_act = act;
633  split(split_act);
634  // spill each interval in inactive for reg at the end of the livetime hole
635  std::for_each(inactive.begin(), inactive.end(),
636  SplitInactive(current.get_operand(), current.front().start,
637  unhandled));
638  }
639  }
640  }
641  }
642  // add current to active
643  active.push_back(current);
644  LOG2("LTI " << current << " moved from unhandled to handled" << nl);
645  }
646 
647  // move everything to handled
648  for (ActiveSetTy::iterator i = active.begin(), e = active.end(); i != e ; /* ++i */) {
649  handled.push_back(*i);
650  i = active.erase(i);
651  }
652  for (InactiveSetTy::iterator i = inactive.begin(), e = inactive.end(); i != e ; /* ++i */) {
653  handled.push_back(*i);
654  i = inactive.erase(i);
655  }
656  return true;
657 }
658 
659 bool LinearScanAllocatorPass::run(JITData &JD) {
660  LA = get_Pass<LivetimeAnalysisPass>();
661  MIS = get_Pass<MachineInstructionSchedulingPass>();
662  jd = &JD;
663  backend = jd->get_Backend();
664 
665  for (LivetimeAnalysisPass::iterator i = LA->begin(), e = LA->end();
666  i != e ; ++i) {
667  LivetimeInterval &inter = i->second;
668  unhandled.push(inter);
669  }
670  // allocate
671  if (!allocate_unhandled()) return false;
672  // resolve
673  if (!resolve()) return false;
674 
675  // set operands
676  for (HandledSetTy::const_iterator i = handled.begin(), e = handled.end(); i != e ; ++i) {
677  LivetimeInterval lti = *i;
678  LOG("Assigned Interval " << lti << " (init: " << *lti.get_init_operand() << ")" <<nl );
679  std::for_each(lti.use_begin(), lti.use_end(), SetUseDefOperand(lti.get_operand()));
680  std::for_each(lti.def_begin(), lti.def_end(), SetUseDefOperand(lti.get_operand()));
681  }
682  return true;
683 }
684 
685 typedef alloc::map<Type::TypeID, alloc::set<MachineOperand*,MachineOperandComp>::type >::type FreeRegsMap;
686 
687 namespace {
688 /**
689  * @Cpp11 use std::function
690  */
691 template <class BinaryOperation>
692 class CfgEdgeFunctor : public std::unary_function<MachineBasicBlock*,void> {
693 private:
694  BinaryOperation op;
695 public:
696  /// Constructor
697  CfgEdgeFunctor(BinaryOperation op) : op(op) {}
698  void operator()(MachineBasicBlock *predecessor) {
699  std::for_each(predecessor->back()->successor_begin(),
700  predecessor->back()->successor_end(), std::bind1st(op,predecessor));
701  }
702 };
703 /// Wrap class template argument.
704 template <class BinaryOperation>
705 CfgEdgeFunctor<BinaryOperation> CfgEdge(BinaryOperation op) {
706  return CfgEdgeFunctor<BinaryOperation>(op);
707 }
708 
709 class CalcBBtoLTIMap : public std::unary_function<MachineBasicBlock*,void> {
710 private:
711  LivetimeAnalysisPass* LA;
712  BBtoLTI_Map &bb2lti_map;
713 
714  struct inner : public std::unary_function<LivetimeAnalysisPass::LivetimeIntervalMapTy::value_type,void> {
715  MachineBasicBlock *MBB;
716  BBtoLTI_Map &bb2lti_map;
717  /// constructor
718  inner(MachineBasicBlock *MBB, BBtoLTI_Map &bb2lti_map) : MBB(MBB), bb2lti_map(bb2lti_map) {}
719  /// function call operator
720  void operator()(argument_type lti_pair) {
721  LivetimeInterval &lti = lti_pair.second;
722  if (lti.get_State(UseDef(UseDef::PseudoUse,MBB->mi_first())) == LivetimeInterval::Active) {
723  bb2lti_map[MBB].push_back(lti);
724  }
725  else {
726  // we need to add the _initial_ lti to find the correct location later on.
727  LivetimeInterval lti_next = lti;
728  while (lti_next.has_next()) {
729  lti_next = lti_next.get_next();
730  if (lti_next.get_State(UseDef(UseDef::PseudoUse,MBB->mi_first())) == LivetimeInterval::Active) {
731  bb2lti_map[MBB].push_back(lti);
732  }
733  }
734  }
735  }
736  };
737 public:
738  /// constructor
739  CalcBBtoLTIMap(LivetimeAnalysisPass* LA, BBtoLTI_Map &bb2lti_map)
740  : LA(LA), bb2lti_map(bb2lti_map) {}
741  /// function call operator
742  void operator()(MachineBasicBlock *MBB) {
743  std::for_each(LA->begin(),LA->end(),inner(MBB,bb2lti_map));
744  }
745 };
746 
747 #if 0
748 bool operator<(const Move &lhs, const Move &rhs) {
749  if (lhs.from < rhs.from)
750  return true;
751  if (rhs.from < lhs.from)
752  return false;
753  return lhs.to < rhs.to;
754 }
755 #endif
756 
757 class ForEachLiveiterval : public std::unary_function<LivetimeInterval&,void> {
758 private:
759  MachineBasicBlock *predecessor;
760  MachineBasicBlock *successor;
761  LivetimeAnalysisPass *LA;
762  MoveMapTy &move_map;
763 public:
764  /// constructor
765  ForEachLiveiterval(MachineBasicBlock *predecessor, MachineBasicBlock *successor,
766  LivetimeAnalysisPass *LA, MoveMapTy &move_map) :
767  predecessor(predecessor), successor(successor), LA(LA), move_map(move_map) {}
768  /// function call operator
769  void operator()(LivetimeInterval& lti) const {
770  LOG2("live: " << lti << " op: " << *lti.get_init_operand() << nl);
771 
772  MachineOperand *move_from;
773  // if is phi
774  if (lti.front().start.get_iterator() == successor->mi_first()) {
775  MachinePhiInst *phi = get_phi_from_operand(successor, lti.get_init_operand());
776  assert(phi);
777  LOG2("starts at successor begin: " << *phi << nl);
778  std::size_t index = successor->get_predecessor_index(predecessor);
779  MachineOperand *op = phi->get(index).op;
780  if (op->is_Immediate()) {
781  ABORT_MSG("PHI with immediate operand", "Can not yet handle phis with immediate operands");
782  move_from = NULL;
783  } else {
784  move_from = LA->get(op).get_operand(predecessor->mi_last());
785  }
786 
787  }
788  else {
789  move_from = lti.get_operand(predecessor->mi_last());
790  }
791  MachineOperand *move_to = lti.get_operand(successor->mi_first());
792  LOG3(" from: " << move_from << " to " << move_to << nl);
793  if (!move_from->aquivalent(*move_to)) {
794  move_map.push_back(Move(move_from,move_to));
795  }
796  }
797 };
798 
799 #if defined(ENABLE_LOGGING)
800 OStream& operator<<(OStream &OS, const FreeRegsMap &free_regs) {
801  for (FreeRegsMap::const_iterator i = free_regs.begin(), e = free_regs.end() ; i!=e ; ++i) {
802  Type::TypeID type = i->first;
803  const FreeRegsMap::mapped_type &regs = i->second;
804  OS << "type: " << type << " ";
805  print_ptr_container(OS,regs.begin(), regs.end()) << nl;
806  }
807  return OS;
808 }
809 #endif
810 
811 Type::TypeID types[] = {
812  Type::ByteTypeID,
813  Type::IntTypeID,
814  Type::LongTypeID,
815  Type::ReferenceTypeID,
816  Type::DoubleTypeID,
817  Type::VoidTypeID
818 };
819 
820 inline void free_regs_remove_op(FreeRegsMap &free_regs, MachineOperand *op) {
821  for (Type::TypeID *p = types; *p != Type::VoidTypeID; ++p) {
822  Type::TypeID type = *p;
823  free_regs[type].erase(op);
824  }
825 }
826 
827 inline MachineOperand* get_and_remove_free_regs(FreeRegsMap &free_regs, Type::TypeID type) {
828  FreeRegsMap::mapped_type &map = free_regs[type];
829  if(map.empty()) {
830  // no more registers
831  return NULL;
832  }
833  STATISTICS(++num_resolution_regs);
834  // arbitrary select the first free register
835  FreeRegsMap::mapped_type::iterator i = map.begin();
836  MachineOperand *op = *i;
837  map.erase(i);
838  free_regs_remove_op(free_regs,op);
839  return op;
840 }
841 
842 
843 /**
844  * @todo merge with movemap creation
845  */
846 inline void calc_free_regs(FreeRegsMap &free_regs, BBtoLTI_Map &bb2lti_map, MachineBasicBlock* predecessor,
847  MachineBasicBlock *successor, Backend *backend, LivetimeAnalysisPass *LA) {
848  // get all potential registers
849  for (Type::TypeID *p = types; *p != Type::VoidTypeID; ++p) {
850  Type::TypeID type = *p;
851  OperandFile OF;
852  VirtualRegister vreg(type);
853  backend->get_OperandFile(OF,&vreg);
854  free_regs[type].insert(OF.begin(),OF.end());
855  }
856  // remove used operands
857  BBtoLTI_Map::mapped_type &ltis = bb2lti_map[successor];
858  LOG2(free_regs << nl);
859  for (BBtoLTI_Map::mapped_type::iterator i = ltis.begin(), e = ltis.end(); i != e; ++i) {
860  LivetimeInterval lti = *i;
861  MachineOperand *op_pred;
862  // if is phi
863  if (lti.front().start.get_iterator() == successor->mi_first()) {
864  MachinePhiInst *phi = get_phi_from_operand(successor, lti.get_init_operand());
865  assert(phi);
866  std::size_t index = successor->get_predecessor_index(predecessor);
867  MachineOperand *op = phi->get(index).op;
868  op_pred = LA->get(op).get_operand(predecessor->mi_last());
869  } else {
870  op_pred = lti.get_operand(predecessor->mi_last());
871  }
872  MachineOperand *op_succ = lti.get_operand(successor->mi_first());
873  assert(op_succ);
874  assert(op_pred);
875  // delete from _all_ types
876  LOG2("calc_free_regs: " << op_pred << " -> " << op_succ << nl);
877  free_regs_remove_op(free_regs,op_succ);
878  free_regs_remove_op(free_regs,op_pred);
879  }
880 }
881 
882 #if defined(ENABLE_LOGGING)
883 inline OStream& operator<<(OStream &OS, const Move &move) {
884  return OS << "move from " << *move.from << " to " << *move.to;
885 }
886 #endif
887 
888 inline bool is_stack2stack_move(Move* move) {
889  return (move->from->is_ManagedStackSlot() || move->from->is_StackSlot() ) &&
890  (move->to->is_ManagedStackSlot() || move->to->is_StackSlot());
891 }
892 
893 bool compare_moves(Move *lhs, Move *rhs) {
894  if (!lhs->dep)
895  return true;
896  if (!rhs->dep)
897  return false;
898  return lhs < rhs;
899 }
900 
901 class MoveScheduler {
902 private:
903  alloc::list<MachineInstruction*>::type &scheduled;
904  MoveMapTy &move_map;
905  alloc::list<Move*>::type &queue;
906  Backend *backend;
907  alloc::list<Move*>::type &stack;
908  FreeRegsMap &free_regs;
909 public:
910  /// constructor
911  MoveScheduler(alloc::list<MachineInstruction*>::type &scheduled, MoveMapTy &move_map,
912  alloc::list<Move*>::type &queue, Backend *backend, alloc::list<Move*>::type &stack,
913  FreeRegsMap &free_regs)
914  : scheduled(scheduled), move_map(move_map), queue(queue), backend(backend), stack(stack),
915  free_regs(free_regs) {}
916  /// call operator
917  void operator()() {
918  Move* node = stack.back();
919  assert(!node->from->aquivalent(*node->to));
920  LOG2("schedule pre: " << *node << nl);
921  // already scheduled?
922  if (node->is_scheduled()) {
923  stack.pop_back();
924  return;
925  }
926  if (is_stack2stack_move(node)){
927  LOG2("Stack to stack move detected!" << nl);
928  MachineOperand *tmp = get_and_remove_free_regs(free_regs,node->to->get_type());
929  if (!tmp) {
930  // No more free register!
931  Type::TypeID type = node->from->get_type();
932  // get stack slot
933  MachineOperand *tmp_stack = backend->get_JITData()->
934  get_StackSlotManager()->create_ManagedStackSlot(type);
935  // get reg
936  OperandFile OF;
937  backend->get_OperandFile(OF,tmp_stack);
938  MachineOperand *tmp_reg = OF.front();
939  // backup register
940  scheduled.push_back(backend->create_Move(tmp_reg,tmp_stack));
941  // stack to stack move
942  scheduled.push_back(backend->create_Move(node->from,tmp_reg));
943  scheduled.push_back(backend->create_Move(tmp_reg,node->to));
944  // restore register
945  scheduled.push_back(backend->create_Move(tmp_stack,tmp_reg));
946  //
947  LOG2("schedule post: " << *node << nl);
948  stack.pop_back();
949  node->scheduled = true;
950  return;
951  }
952 
953  move_map.push_back(Move(tmp, node->to));
954  queue.push_back(&move_map.back());
955  node->to = tmp;
956  // no really needed, is it?
957  node->dep = NULL;
958  }
959  else if (node->dep && !node->dep->is_scheduled()) {
960  alloc::list<Move*>::type::iterator i = std::find(stack.begin(),stack.end(),node->dep);
961  if (i != stack.end()) {
962  LOG2("cycle detected!" << nl);
963  // try to get a register
964  MachineOperand *tmp = get_and_remove_free_regs(free_regs,node->to->get_type());
965  if (!tmp) {
966  // No more free register -> use stack slot
967  assert(!node->from->is_StackSlot());
968  assert(!node->to->is_StackSlot());
969  tmp = get_stackslot(backend,node->to->get_type());
970  STATISTICS(++num_resolution_stacks);
971  }
972  move_map.push_back(Move(tmp, node->to));
973  Move *tmp_move = &move_map.back();
974  tmp_move->dep = *i;
975  queue.push_back(tmp_move);
976  node->to = tmp;
977  node->dep = NULL;
978  } else {
979  stack.push_back(node->dep);
980  operator()();
981  }
982  }
983  LOG2("schedule post: " << *node << nl);
984  stack.pop_back();
985  scheduled.push_back(backend->create_Move(node->from,node->to));
986  node->scheduled = true;
987  }
988 };
989 
990 template <class OutputIterator>
991 struct RefToPointerInserterImpl: public std::unary_function<Move&,void> {
992  OutputIterator i;
993  /// constructor
994  RefToPointerInserterImpl(OutputIterator i) : i(i) {}
995  /// call operator
996  void operator()(Move& move) {
997  *i = &move;
998  }
999 };
1000 /// wrapper
1001 template <class OutputIterator>
1002 inline RefToPointerInserterImpl<OutputIterator> RefToPointerInserter(OutputIterator i) {
1003  return RefToPointerInserterImpl<OutputIterator>(i);
1004 }
1005 
1006 inline LivetimeInterval& find_or_insert(
1007  LivetimeAnalysisPass::LivetimeIntervalMapTy &lti_map, MachineOperand* op) {
1008  LivetimeAnalysisPass::LivetimeIntervalMapTy::iterator i = lti_map.find(op);
1009  if (i == lti_map.end()) {
1010  LivetimeInterval lti(op);
1011  i = lti_map.insert(std::make_pair(op,lti)).first;
1012  }
1013  return i->second;
1014 }
1015 /**
1016  * @Cpp11 use std::function
1017  */
1018 class ProcessOutOperands : public std::unary_function<MachineOperandDesc,void> {
1019 private:
1020  LivetimeAnalysisPass::LivetimeIntervalMapTy &lti_map;
1021  MIIterator i;
1022  MIIterator e;
1023 public:
1024  /// Constructor
1025  ProcessOutOperands(LivetimeAnalysisPass::LivetimeIntervalMapTy &lti_map,
1026  MIIterator i, MIIterator e)
1027  : lti_map(lti_map), i(i), e(e) {}
1028  void operator()(MachineOperandDesc &op) {
1029  if (op.op->is_virtual()) {
1030  find_or_insert(lti_map,op.op).set_from(UseDef(UseDef::Def,i,&op), UseDef(UseDef::PseudoDef,e));
1031  }
1032  }
1033 };
1034 /**
1035  * @Cpp11 use std::function
1036  */
1037 class ProcessInOperands : public std::unary_function<MachineOperandDesc,void> {
1038 private:
1039  LivetimeAnalysisPass::LivetimeIntervalMapTy &lti_map;
1040  MIIterator first;
1041  MIIterator current;
1042 public:
1043  /// Constructor
1044  ProcessInOperands(LivetimeAnalysisPass::LivetimeIntervalMapTy &lti_map,
1045  MIIterator first, MIIterator current) : lti_map(lti_map), first(first),
1046  current(current) {}
1047  void operator()(MachineOperandDesc &op) {
1048  if (op.op->is_virtual()) {
1049  find_or_insert(lti_map,op.op).add_range(UseDef(UseDef::PseudoUse,first),
1050  UseDef(UseDef::Use,current,&op));
1051  }
1052  }
1053 };
1054 
1055 class ForEachCfgEdge : public std::binary_function<MachineBasicBlock*,MachineBasicBlock*,void> {
1056 private:
1057  BBtoLTI_Map &bb2lti_map;
1058  EdgeMoveMapTy &edge_move_map;
1059  LivetimeAnalysisPass *LA;
1060 
1061 public:
1062  /// constructor
1063  ForEachCfgEdge(BBtoLTI_Map &bb2lti_map, EdgeMoveMapTy &edge_move_map, LivetimeAnalysisPass *LA)
1064  : bb2lti_map(bb2lti_map), edge_move_map(edge_move_map), LA(LA) {}
1065  /// function call operator
1066  void operator()(MachineBasicBlock *predecessor, MachineBasicBlock *successor) const {
1067  MoveMapTy &move_map = edge_move_map[Edge(predecessor,successor)];
1068  LOG2(Cyan << "edge " << *predecessor << " -> " << *successor << nl);
1069  BBtoLTI_Map::mapped_type &lti_live_set = bb2lti_map[successor];
1070  // for each live interval live at successor
1071  std::for_each(lti_live_set.begin(), lti_live_set.end(),
1072  ForEachLiveiterval(predecessor,successor, LA, move_map));
1073  }
1074 };
1075 
1076 } // end anonymous namespace
1077 
1078 
1079 //bool LinearScanAllocatorPass::order_and_insert_move(MachineBasicBlock *predecessor, MachineBasicBlock *successor,
1080 // MoveMapTy &move_map) {
1081 bool LinearScanAllocatorPass::order_and_insert_move(EdgeMoveMapTy::value_type &entry, BBtoLTI_Map &bb2lti_map) {
1082  MachineBasicBlock *predecessor = entry.first.predecessor;
1083  MachineBasicBlock *successor = entry.first.successor;
1084  MoveMapTy &move_map = entry.second;
1085 
1086  LOG2("order_and_insert_move: " << *predecessor << " -> " << *successor << nl);
1087  // get free registers
1088  FreeRegsMap free_regs;
1089  calc_free_regs(free_regs,bb2lti_map,predecessor,successor,backend, LA);
1090  LOG2(free_regs << nl);
1091 
1092  if (move_map.empty()) return true;
1093  // calculate data dependency graph (DDG)
1095 
1096  // create graph nodes
1097  for (MoveMapTy::iterator i = move_map.begin(), e = move_map.end(); i != e; ++i) {
1098  LOG2("need move from " << i->from << " to " << i->to << nl);
1099  read_from[i->from] = &*i;
1100  }
1101  // create graph edges
1102  for (MoveMapTy::iterator i = move_map.begin(), e = move_map.end(); i != e; ++i) {
1103  i->dep = read_from[i->to];
1104  }
1105  // nodes already scheduled
1109  std::for_each(move_map.begin(), move_map.end(), RefToPointerInserter(std::back_inserter(queue)));
1110  assert(move_map.size() == queue.size());
1111 
1112  MoveScheduler scheduler(scheduled, move_map, queue, backend, stack, free_regs);
1113 
1114  while (!queue.empty()) {
1115  // sort and pop element
1116  queue.sort(compare_moves);
1117  stack.push_back(queue.front());
1118  queue.pop_front();
1119  // schedule
1120  scheduler();
1121  assert(stack.empty());
1122  }
1123 
1124  MIIterator last = get_edge_iterator(predecessor, successor,backend);
1125  MIIterator first = last;
1126  // insert first element
1127  insert_before(last,scheduled.front());
1128  --first;
1129  for (alloc::list<MachineInstruction*>::type::const_iterator i = ++scheduled.begin(),
1130  e = scheduled.end(); i != e ; ++i) {
1131  MachineInstruction *MI = *i;
1132  insert_before(last, MI);
1133  #if defined(ENABLE_STATISTICS)
1134  if (MI->front().op->is_ManagedStackSlot() && MI->back().op->is_Register()) {
1135  STATISTICS(++num_spill_loads);
1136  }
1137  else if (MI->front().op->is_Register() && MI->back().op->is_ManagedStackSlot()) {
1138  STATISTICS(++num_spill_stores);
1139  }
1140  else {
1141  STATISTICS(++num_resolution_moves);
1142  }
1143  #endif
1144  }
1145  return true;
1146 }
1147 
1148 
1149 bool LinearScanAllocatorPass::reg_alloc_resolve_block(MIIterator first, MIIterator last) {
1150  assert(active.empty());
1151  assert(inactive.empty());
1152  assert(unhandled.empty());
1153  std::size_t handled_size = handled.size();
1154  /// calculate state sets
1155  for (HandledSetTy::iterator i = handled.begin(), e = handled.end();
1156  i != e ; /* ++i */ ) {
1157  LivetimeInterval lti = *i;
1158  switch (lti.get_State(first)) {
1159  case LivetimeInterval::Active:
1160  active.push_back(lti);
1161  i = handled.erase(i);
1162  break;
1163  case LivetimeInterval::Inactive:
1164  inactive.push_back(lti);
1165  i = handled.erase(i);
1166  break;
1167  default:
1168  ++i;
1169  break;
1170  }
1171  }
1172  /// calculate intervals
1174  MIIterator current = last;
1175  while (first != current) {
1176  --current;
1177  // for each output operand of MI
1178  ProcessOutOperands(lti_map, current, last)((*current)->get_result());
1179  // for each input operand of MI
1180  std::for_each((*current)->begin(),(*current)->end(),
1181  ProcessInOperands(lti_map, current, last));
1182  }
1183  for(LivetimeAnalysisPass::LivetimeIntervalMapTy::iterator i = lti_map.begin(),
1184  e = lti_map.end(); i != e; ++i) {
1185  unhandled.push(i->second);
1186  }
1187  handled_size += unhandled.size();
1188  allocate_unhandled();
1189  assert_msg(handled.size() == handled_size, "handled.size(): " << handled.size() << " != handled_size: "
1190  << handled_size);
1191  return true;
1192 }
1193 
1194 bool LinearScanAllocatorPass::resolve() {
1195  LOG(BoldGreen << "resolve: " << nl);
1196  // calculate basicblock to live interval map
1197  BBtoLTI_Map bb2lti_map;
1198  std::for_each(MIS->begin(), MIS->end(), CalcBBtoLTIMap(LA,bb2lti_map));
1199 
1200  if (DEBUG_COND_N(2))
1201  for (BBtoLTI_Map::const_iterator i = bb2lti_map.begin(), e = bb2lti_map.end(); i != e ; ++i) {
1202  LOG2("live at: " << *i->first << nl);
1203  for (BBtoLTI_Map::mapped_type::const_iterator ii = i->second.begin(), ee = i->second.end(); ii != ee ; ++ii) {
1204  LOG2(" " << *ii << nl);
1205  }
1206  }
1207 
1208  EdgeMoveMapTy edge_move_map;
1209  // for each cfg edge from predecessor to successor (implemented in ForEachCfgEdge)
1210  std::for_each(MIS->begin(), MIS->end(), CfgEdge(ForEachCfgEdge(bb2lti_map,edge_move_map,LA)));
1211  // order and insert move
1212  for (EdgeMoveMapTy::iterator i = edge_move_map.begin(), e = edge_move_map.end(); i != e ; ++i) {
1213  if (!order_and_insert_move(*i,bb2lti_map)) {
1214  return false;
1215  }
1216  }
1217  // remove all phis
1218  std::for_each(MIS->begin(), MIS->end(), std::mem_fun(&MachineBasicBlock::phi_clear));
1219 
1220  return true;
1221 }
1222 
1223 #define LSRA_VERIFY
1224 namespace {
1225 
1226 #if defined(LSRA_VERIFY)
1227 inline bool is_virtual(const MachineOperandDesc &op) {
1228  return op.op->is_virtual();
1229 }
1230 #endif
1231 
1232 } // end anonymous namespace
1233 bool LinearScanAllocatorPass::verify() const {
1234 #if defined(LSRA_VERIFY)
1235  for(MIIterator i = MIS->mi_begin(), e = MIS->mi_end(); i != e ; ++i) {
1236  MachineInstruction *MI = *i;
1237  // result virtual
1238  if (is_virtual(MI->get_result()) || cacao::any_of(MI->begin(), MI->end(), is_virtual)) {
1239  ERROR_MSG("Unallocated Operand!","Instruction " << *MI << " contains unallocated operands.");
1240  return false;
1241  }
1242 
1243  }
1244 #endif
1245  return true;
1246 }
1247 
1248 
1249 // pass usage
1250 PassUsage& LinearScanAllocatorPass::get_PassUsage(PassUsage &PU) const {
1251  // requires
1254  // modified
1256  // destroys
1257  // not yet updated correctly (might be only changed)
1259  return PU;
1260 }
1261 
1262 // the address of this variable is used to identify the pass
1263 char LinearScanAllocatorPass::ID = 0;
1264 
1265 // register pass
1266 static PassRegistry<LinearScanAllocatorPass> X("LinearScanAllocatorPass");
1267 
1268 } // end namespace compiler2
1269 } // end namespace jit
1270 } // end namespace cacao
1271 
1272 
1273 /*
1274  * These are local overrides for various environment variables in Emacs.
1275  * Please do not remove this and leave it at the end of the file, where
1276  * Emacs will automagically detect them.
1277  * ---------------------------------------------------------------------
1278  * Local variables:
1279  * mode: c++
1280  * indent-tabs-mode: t
1281  * c-basic-offset: 4
1282  * tab-width: 4
1283  * End:
1284  * vim:noexpandtab:sw=4:ts=4:
1285  */
std::size_t index
const MachineOperandDesc & get(std::size_t i) const
#define STATISTICS(x)
Wrapper for statistics only code.
Definition: statistics.hpp:975
char * regs[]
Definition: disass.c:51
#define DEBUG3(STMT)
Definition: Debug.hpp:128
MIIterator get_iterator() const
Get instruction iterator.
alloc::map< Type::TypeID, alloc::set< MachineOperand *, MachineOperandComp >::type >::type FreeRegsMap
MIIterator get_edge_iterator(MachineBasicBlock *from, MachineBasicBlock *to, Backend *backend)
bool any_of(InputIt first, InputIt last, UnaryPredicate p)
Definition: algorithm.hpp:64
#define max(a, b)
Definition: lsra.hpp:80
MachinePhiInst * get_phi_from_operand(MachineBasicBlock *MBB, MachineOperand *op)
alloc::map< Edge, MoveMapTy >::type EdgeMoveMapTy
u2 op
Definition: disass.cpp:129
A basic block of (scheduled) machine instructions.
OStream & print_container(OStream &OS, _ForwardIterator i, const _ForwardIterator &e)
Definition: OStream.hpp:436
bool operator<(const NativeMethod &first, const NativeMethod &second)
Definition: native.cpp:212
MIIterator insert_before(MIIterator pos, MachineInstruction *value)
Get an edge inserter.
State get_State(MIIterator pos) const
LivetimeInterval split_active(MIIterator pos, UseDef *current=NULL)
Split interval at active pos.
#define ERROR_MSG(EXPR_SHORT, EXPR_LONG)
Definition: logging.hpp:124
#define DEBUG_COND_N(VERBOSE)
Definition: Debug.hpp:112
alloc::map< MachineBasicBlock *, alloc::list< LivetimeInterval >::type >::type BBtoLTI_Map
alloc::map< MachineOperand *, LivetimeInterval, MachineOperandComp >::type LivetimeIntervalMapTy
MachineBasicBlock * current
std::list< T, Allocator< T > > type
Definition: list.hpp:38
virtual bool is_virtual() const
True if operand is virtual and must be assigned during register allocation.
UseDef next_usedef_after(UseDef pos, UseDef end) const
get next use def after pos (if not found return end)
#define DEBUG2(STMT)
Definition: Debug.hpp:127
Instruction::InstID tmp[]
Definition: Matcher.cpp:55
LivetimeInterval split_phi_active(MIIterator pos, MachineOperand *MO)
Split a phi input operand.
void set_operand(MachineOperand *op)
Set the current store of the interval.
struct Edge Edge
Definition: loop.hpp:32
alloc::list< PassInfo::IDTy >::type & stack
alloc::list< Move >::type MoveMapTy
This file contains the statistics framework.
#define min(a, b)
Definition: lsra.hpp:79
Stores the interdependencies of a pass.
Definition: PassUsage.hpp:55
#define STAT_REGISTER_GROUP_VAR(type, var, init, name, description, group)
Register an statistics variable and add it to a group.
Definition: statistics.hpp:967
#define STAT_REGISTER_GROUP(var, name, description)
Register a statistics group.
Definition: statistics.hpp:971
MIIterator i
alloc::list< MachineOperand * >::type OperandFile
#define LOG2(STMT)
Definition: logging.hpp:93
MIIterator pos
ResetColor reset_color
Definition: OStream.cpp:61
OStream & OS
OStream & operator<<(OStream &OS, const std::string &t)
Definition: OStream.hpp:459
MachineOperandDesc * get_operand() const
Get operand descriptor.
#define LOG3(STMT)
Definition: logging.hpp:94
MIIterator e
bool aquivalent(const MachineOperand &MO) const
MachineOperand * get_init_operand() const
Get the initial operand.
#define LOG(STMT)
Analogous to DEBUG.
Definition: logging.hpp:91
Proxy to encode explicit and implicit successors.
UseDef next_intersection(const LivetimeInterval &a, const LivetimeInterval &b, UseDef pos, UseDef end)
STAT_REGISTER_GROUP_VAR_EXTERN(int, size_classinfo, 0,"size classinfo","classinfo", info_struct_stat) STAT_REGISTER_GROUP_VAR(int
static PassRegistry< LinearScanAllocatorPass > X("LinearScanAllocatorPass")
LivetimeIntervalMapTy & lti_map
jmethodID jint const void jint const jvmtiAddrLocationMap * map
Definition: jvmti.h:338
Operands that can be directly used by the machine (register, memory, stackslot)
void add_destroys()
PassName will be invalidated.
Definition: PassUsage.hpp:89
OptionPrefix & xx_root()
Definition: Option.cpp:39
alloc::deque< PassInfo::IDTy >::type & unhandled
const MachineOperandDesc & get_result() const
alloc::set< Instruction * >::type & scheduled
MachineOperand * get_operand() const
Get the current store of the interval.
#define assert_msg(COND, EXPR)
Definition: logging.hpp:107
MachineOperand * get_hint() const
Get the hint for the interval.
OStream & print_ptr_container(OStream &OS, _ForwardIterator i, const _ForwardIterator &e)
Definition: OStream.hpp:448
bool is_def() const
Is a def position.
void add_modifies()
PassName will be modified.
Definition: PassUsage.hpp:100
#define ABORT_MSG(EXPR_SHORT, EXPR_LONG)
Definition: logging.hpp:133
Nl nl
Definition: OStream.cpp:56
OStream & dbg()
The default destination for logging messages.
void add_requires()
PassName is required.
Definition: PassUsage.hpp:78