diff --git a/src/analysis/fpc.h b/src/analysis/fpc.h index a7ce260391ebafe190157b33d628af381a7aab81..148cc31deb58d8f03e4c11f8aff3dc7ebeb207ab 100644 --- a/src/analysis/fpc.h +++ b/src/analysis/fpc.h @@ -43,8 +43,8 @@ typename BBAnalyzer<T>::AllMap computeFixedPoint(CFG* cfg, const BBAnalyzer<T> & std::vector<CFGBlock*> q; - states.insert(make_pair(cfg->blocks[0], Map())); - q.push_back(cfg->blocks[0]); + states.insert(make_pair(cfg->getStartingBlock(), Map())); + q.push_back(cfg->getStartingBlock()); while (q.size()) { CFGBlock *block = q.back(); diff --git a/src/analysis/function_analysis.cpp b/src/analysis/function_analysis.cpp index 08fbd0d9320199aa30c396d95e20cb0f23e879f1..910ddde06df99429793c76d8630b6075cc1f9b15 100644 --- a/src/analysis/function_analysis.cpp +++ b/src/analysis/function_analysis.cpp @@ -120,9 +120,10 @@ class DefinednessBBAnalyzer : public BBAnalyzer<DefinednessAnalysis::DefinitionL private: typedef DefinednessAnalysis::DefinitionLevel DefinitionLevel; + CFG* cfg; AST_arguments* arguments; public: - DefinednessBBAnalyzer(AST_arguments* arguments) : arguments(arguments) { + DefinednessBBAnalyzer(CFG* cfg, AST_arguments* arguments) : cfg(cfg), arguments(arguments) { } virtual DefinitionLevel merge(DefinitionLevel from, DefinitionLevel into) const { @@ -225,7 +226,7 @@ void DefinednessBBAnalyzer::processBB(Map &starting, CFGBlock *block) const { for (int i = 0; i < block->body.size(); i++) { block->body[i]->accept(&visitor); } - if (block->idx == 0 && arguments) { + if (block == cfg->getStartingBlock() && arguments) { arguments->accept(&visitor); } @@ -238,7 +239,7 @@ void DefinednessBBAnalyzer::processBB(Map &starting, CFGBlock *block) const { } DefinednessAnalysis::DefinednessAnalysis(AST_arguments *args, CFG* cfg, ScopeInfo *scope_info) : scope_info(scope_info) { - results = computeFixedPoint(cfg, DefinednessBBAnalyzer(args), false); + results = computeFixedPoint(cfg, DefinednessBBAnalyzer(cfg, args), false); for (auto p : results) { RequiredSet required; @@ -267,9 +268,7 @@ const DefinednessAnalysis::RequiredSet& DefinednessAnalysis::getDefinedNamesAt(C PhiAnalysis::PhiAnalysis(AST_arguments* args, CFG* cfg, LivenessAnalysis *liveness, ScopeInfo *scope_info) : definedness(args, cfg, scope_info), liveness(liveness) { - for (int i = 0; i < cfg->blocks.size(); i++) { - CFGBlock *block = cfg->blocks[i]; - + for (CFGBlock *block : cfg->blocks) { RequiredSet required; if (block->predecessors.size() < 2) continue; diff --git a/src/analysis/type_analysis.cpp b/src/analysis/type_analysis.cpp index 13ff690c932d99e4db08b8813b6ec24d47ed8b20..938597fa923cfc35e02d4c3f0eb5d5f2faf745f0 100644 --- a/src/analysis/type_analysis.cpp +++ b/src/analysis/type_analysis.cpp @@ -73,7 +73,7 @@ static BoxedClass* simpleCallSpeculation(AST_Call* node, CompilerType* rtn_type, } typedef std::unordered_map<std::string, CompilerType*> TypeMap; -typedef std::unordered_map<int, TypeMap> AllTypeMap; +typedef std::unordered_map<CFGBlock*, TypeMap> AllTypeMap; typedef std::unordered_map<AST_expr*, CompilerType*> ExprTypeMap; typedef std::unordered_map<AST_expr*, BoxedClass*> TypeSpeculations; class BasicBlockTypePropagator : public ExprVisitor, public StmtVisitor { @@ -479,7 +479,7 @@ class PropagatingTypeAnalysis : public TypeAnalysis { return getTypeAtBlockStart(name, block->successors[0]); } virtual ConcreteCompilerType* getTypeAtBlockStart(const std::string &name, CFGBlock* block) { - CompilerType *base = starting_types[block->idx][name]; + CompilerType *base = starting_types[block][name]; ASSERT(base != NULL, "%s %d", name.c_str(), block->idx); ConcreteCompilerType *rtn = base->getConcreteType(); @@ -538,7 +538,7 @@ class PropagatingTypeAnalysis : public TypeAnalysis { assert(arg_names.size() == arg_types.size()); { - TypeMap &initial_types = starting_types[0]; + TypeMap &initial_types = starting_types[cfg->getStartingBlock()]; for (int i = 0; i < arg_names.size(); i++) { AST_expr* arg = arg_names[i]; assert(arg->type == AST_TYPE::Name); @@ -547,36 +547,34 @@ class PropagatingTypeAnalysis : public TypeAnalysis { } } - std::unordered_set<int> in_queue; - std::deque<int> queue; - queue.push_back(0); + std::unordered_set<CFGBlock*> in_queue; + std::deque<CFGBlock*> queue; + queue.push_back(cfg->getStartingBlock()); while (queue.size()) { - int block_id = queue.front(); + CFGBlock *block = queue.front(); queue.pop_front(); - in_queue.erase(block_id); - - CFGBlock *block = cfg->blocks[block_id]; + in_queue.erase(block); TypeMap ending; if (VERBOSITY("types")) { - printf("processing types for block %d\n", block_id); + printf("processing types for block %d\n", block->idx); } if (VERBOSITY("types") >= 2) { printf("before:\n"); - TypeMap &starting = starting_types[block_id]; + TypeMap &starting = starting_types[block]; for (auto p : starting) { ASSERT(p.second, "%s", p.first.c_str()); printf("%s: %s\n", p.first.c_str(), p.second->debugName().c_str()); } } - BasicBlockTypePropagator::propagate(block, starting_types[block_id], ending, expr_types, type_speculations, speculation, scope_info); + BasicBlockTypePropagator::propagate(block, starting_types[block], ending, expr_types, type_speculations, speculation, scope_info); if (VERBOSITY("types") >= 2) { printf("before (after):\n"); - TypeMap &starting = starting_types[block_id]; + TypeMap &starting = starting_types[block]; for (auto p : starting) { ASSERT(p.second, "%s", p.first.c_str()); printf("%s: %s\n", p.first.c_str(), p.second->debugName().c_str()); @@ -589,21 +587,20 @@ class PropagatingTypeAnalysis : public TypeAnalysis { } for (int i = 0; i < block->successors.size(); i++) { - int next_id = block->successors[i]->idx; - bool first = (starting_types.count(next_id) == 0); - bool changed = merge(ending, starting_types[next_id]); - if ((first || changed) && in_queue.insert(next_id).second) { - queue.push_back(next_id); + CFGBlock *next_block = block->successors[i]; + bool first = (starting_types.count(next_block) == 0); + bool changed = merge(ending, starting_types[next_block]); + if ((first || changed) && in_queue.insert(next_block).second) { + queue.push_back(next_block); } } } if (VERBOSITY("types") >= 2) { - for (int i = 0; i < cfg->blocks.size(); i++) { - printf("Types at beginning of block %d:\n", i); - CFGBlock *b = cfg->blocks[i]; + for (CFGBlock *b : cfg->blocks) { + printf("Types at beginning of block %d:\n", b->idx); - TypeMap &starting = starting_types[i]; + TypeMap &starting = starting_types[b]; for (auto p : starting) { ASSERT(p.second, "%s", p.first.c_str()); printf("%s: %s\n", p.first.c_str(), p.second->debugName().c_str()); diff --git a/src/codegen/irgen.cpp b/src/codegen/irgen.cpp index f7e4a998ae6d24b6aba8602d9f458417fc042de6..2c7714306e62bc313c7df89052beef031f38fb1b 100644 --- a/src/codegen/irgen.cpp +++ b/src/codegen/irgen.cpp @@ -280,20 +280,19 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua llvm::MDNode* func_info = irstate->getFuncDbgInfo(); if (entry_descriptor != NULL) - assert(full_blocks.count(source->cfg->blocks[0]) == 0); + assert(full_blocks.count(source->cfg->getStartingBlock()) == 0); // We need the entry blocks pre-allocated so that we can jump forward to them. - std::vector<llvm::BasicBlock*> llvm_entry_blocks; - for (int i = 0; i < source->cfg->blocks.size(); i++) { - CFGBlock *block = source->cfg->blocks[i]; + std::unordered_map<CFGBlock*, llvm::BasicBlock*> llvm_entry_blocks; + for (CFGBlock* block : source->cfg->blocks) { if (partial_blocks.count(block) == 0 && full_blocks.count(block) == 0) { - llvm_entry_blocks.push_back(NULL); + llvm_entry_blocks[block] = NULL; continue; } char buf[40]; - snprintf(buf, 40, "%s_block%d", bb_type, i); - llvm_entry_blocks.push_back(llvm::BasicBlock::Create(g.context, buf, irstate->getLLVMFunction())); + snprintf(buf, 40, "%s_block%d", bb_type, block->idx); + llvm_entry_blocks[block] = llvm::BasicBlock::Create(g.context, buf, irstate->getLLVMFunction()); } llvm::BasicBlock *osr_entry_block = NULL; // the function entry block, where we add the type guards @@ -416,7 +415,7 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua } else { entry_emitter->getBuilder()->CreateBr(osr_unbox_block); } - unbox_emitter->getBuilder()->CreateBr(llvm_entry_blocks[entry_descriptor->backedge->target->idx]); + unbox_emitter->getBuilder()->CreateBr(llvm_entry_blocks[entry_descriptor->backedge->target]); for (auto p : *initial_syms) { delete p.second; @@ -428,21 +427,21 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua // so that we can construct phi nodes later. // Originally I preallocated these blocks as well, but we can construct the phi's // after the fact, so we can just record the exit blocks as we go along. - std::unordered_map<int, llvm::BasicBlock*> llvm_exit_blocks; + std::unordered_map<CFGBlock*, llvm::BasicBlock*> llvm_exit_blocks; //// // Main ir generation: go through each basic block in the CFG and emit the code - std::unordered_map<int, SymbolTable*> ending_symbol_tables; - std::unordered_map<int, ConcreteSymbolTable*> phi_ending_symbol_tables; + std::unordered_map<CFGBlock*, SymbolTable*> ending_symbol_tables; + std::unordered_map<CFGBlock*, ConcreteSymbolTable*> phi_ending_symbol_tables; typedef std::unordered_map<std::string, std::pair<ConcreteCompilerType*, llvm::PHINode*> > PHITable; - std::unordered_map<int, PHITable*> created_phis; + std::unordered_map<CFGBlock*, PHITable*> created_phis; CFGBlock* initial_block = NULL; if (entry_descriptor) { initial_block = entry_descriptor->backedge->target; - } else if (full_blocks.count(source->cfg->blocks[0])) { - initial_block = source->cfg->blocks[0]; + } else if (full_blocks.count(source->cfg->getStartingBlock())) { + initial_block = source->cfg->getStartingBlock(); } // The rest of this code assumes that for each non-entry block that gets evaluated, @@ -458,11 +457,6 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua for (int _i = 0; _i < traversal_order.size(); _i++) { CFGBlock *block = traversal_order[_i].first; CFGBlock *pred = traversal_order[_i].second; - //for (int _i = 0; _i < source->cfg->blocks.size(); _i++) { - //CFGBlock *block = source->cfg->blocks[_i]; - //CFGBlock *pred = NULL; - //if (block->predecessors.size()) - //CFGBlock *pred = block->predecessors[0]; if (VERBOSITY("irgen") >= 1) printf("processing %s block %d\n", bb_type, block->idx); @@ -472,27 +466,27 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua is_partial = true; } else if (!full_blocks.count(block)) { if (VERBOSITY("irgen") >= 1) printf("Skipping this block\n"); - //created_phis[block->idx] = NULL; - //ending_symbol_tables[block->idx] = NULL; - //phi_ending_symbol_tables[block->idx] = NULL; - //llvm_exit_blocks[block->idx] = NULL; + //created_phis[block] = NULL; + //ending_symbol_tables[block] = NULL; + //phi_ending_symbol_tables[block] = NULL; + //llvm_exit_blocks[block] = NULL; continue; } std::unique_ptr<IRGenerator> generator(createIRGenerator(irstate, llvm_entry_blocks, block, types, out_guards, in_guards, is_partial)); std::unique_ptr<IREmitter> emitter(createIREmitter(irstate)); - emitter->getBuilder()->SetInsertPoint(llvm_entry_blocks[block->idx]); + emitter->getBuilder()->SetInsertPoint(llvm_entry_blocks[block]); PHITable* phis = NULL; if (!is_partial) { phis = new PHITable(); - created_phis[block->idx] = phis; + created_phis[block] = phis; } // Set initial symbol table: if (is_partial) { // pass - } else if (block->idx == 0) { + } else if (block == source->cfg->getStartingBlock()) { assert(entry_descriptor == NULL); // number of times a function needs to be called to be reoptimized: static const int REOPT_THRESHOLDS[] = { @@ -504,7 +498,7 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua assert(strcmp("opt", bb_type) == 0); if (ENABLE_REOPT && effort < EffortLevel::MAXIMAL && source->ast != NULL && source->ast->type != AST_TYPE::Module) { - llvm::BasicBlock* preentry_bb = llvm::BasicBlock::Create(g.context, "pre_entry", irstate->getLLVMFunction(), llvm_entry_blocks[0]); + llvm::BasicBlock* preentry_bb = llvm::BasicBlock::Create(g.context, "pre_entry", irstate->getLLVMFunction(), llvm_entry_blocks[source->cfg->getStartingBlock()]); llvm::BasicBlock* reopt_bb = llvm::BasicBlock::Create(g.context, "reopt", irstate->getLLVMFunction()); emitter->getBuilder()->SetInsertPoint(preentry_bb); @@ -517,7 +511,7 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua llvm::Value* md_vals[] = {llvm::MDString::get(g.context, "branch_weights"), getConstantInt(1), getConstantInt(1000)}; llvm::MDNode* branch_weights = llvm::MDNode::get(g.context, llvm::ArrayRef<llvm::Value*>(md_vals)); - llvm::BranchInst* guard = emitter->getBuilder()->CreateCondBr(reopt_test, reopt_bb, llvm_entry_blocks[0], branch_weights); + llvm::BranchInst* guard = emitter->getBuilder()->CreateCondBr(reopt_test, reopt_bb, llvm_entry_blocks[source->cfg->getStartingBlock()], branch_weights); emitter->getBuilder()->SetInsertPoint(reopt_bb); //emitter->getBuilder()->CreateCall(g.funcs.my_assert, getConstantInt(0, g.i1)); @@ -542,7 +536,7 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua emitter->getBuilder()->CreateRet(postcall); } - emitter->getBuilder()->SetInsertPoint(llvm_entry_blocks[0]); + emitter->getBuilder()->SetInsertPoint(llvm_entry_blocks[source->cfg->getStartingBlock()]); } generator->unpackArguments(arg_names, cf->sig->arg_types); } else if (entry_descriptor && block == entry_descriptor->backedge->target) { @@ -570,7 +564,7 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua assert(block->predecessors.size()); for (int i = 0; i < block->predecessors.size(); i++) { CFGBlock *b2 = block->predecessors[i]; - assert(ending_symbol_tables.count(b2->idx) == 0); + assert(ending_symbol_tables.count(b2) == 0); into_hax.insert(b2); } @@ -604,19 +598,19 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua if (block->predecessors.size() == 1) { // If this block has only one predecessor, it by definition doesn't need any phi nodes. // Assert that the phi_st is empty, and just create the symbol table from the non-phi st: - ASSERT(phi_ending_symbol_tables[pred->idx]->size() == 0, "%d %d", block->idx, pred->idx); - assert(ending_symbol_tables.count(pred->idx)); - generator->copySymbolsFrom(ending_symbol_tables[pred->idx]); + ASSERT(phi_ending_symbol_tables[pred]->size() == 0, "%d %d", block->idx, pred->idx); + assert(ending_symbol_tables.count(pred)); + generator->copySymbolsFrom(ending_symbol_tables[pred]); } else { // With multiple predecessors, the symbol tables at the end of each predecessor should be *exactly* the same. // (this should be satisfied by the post-run() code in this function) // With multiple predecessors, we have to combine the non-phi and phi symbol tables. // Start off with the non-phi ones: - generator->copySymbolsFrom(ending_symbol_tables[pred->idx]); + generator->copySymbolsFrom(ending_symbol_tables[pred]); // And go through and add phi nodes: - ConcreteSymbolTable *pred_st = phi_ending_symbol_tables[pred->idx]; + ConcreteSymbolTable *pred_st = phi_ending_symbol_tables[pred]; for (ConcreteSymbolTable::iterator it = pred_st->begin(); it != pred_st->end(); it++) { //printf("adding phi for %s\n", it->first.c_str()); llvm::PHINode *phi = emitter->getBuilder()->CreatePHI(it->second->getType()->llvmType(), block->predecessors.size(), it->first); @@ -632,9 +626,9 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua generator->run(block); const IRGenerator::EndingState &ending_st = generator->getEndingSymbolTable(); - ending_symbol_tables[block->idx] = ending_st.symbol_table; - phi_ending_symbol_tables[block->idx] = ending_st.phi_symbol_table; - llvm_exit_blocks[block->idx] = ending_st.ending_block; + ending_symbol_tables[block] = ending_st.symbol_table; + phi_ending_symbol_tables[block] = ending_st.phi_symbol_table; + llvm_exit_blocks[block] = ending_st.ending_block; if (into_hax.count(block)) ASSERT(ending_st.symbol_table->size() == 0, "%d", block->idx); @@ -646,14 +640,12 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua // the relevant IR, so after we have done all of it, go back through and populate the phi nodes. // Also, do some checking to make sure that the phi analysis stuff worked out, and that all blocks // agreed on what symbols + types they should be propagating for the phis. - for (int i = 0; i < source->cfg->blocks.size(); i++) { - PHITable *phis = created_phis[i]; + for (CFGBlock *b : source->cfg->blocks) { + PHITable *phis = created_phis[b]; if (phis == NULL) continue; - bool this_is_osr_entry = (entry_descriptor && i == entry_descriptor->backedge->target->idx); - - CFGBlock *b = source->cfg->blocks[i]; + bool this_is_osr_entry = (entry_descriptor && b == entry_descriptor->backedge->target); const std::vector<GuardList::BlockEntryGuard*> &block_guards = in_guards.getGuardsForBlock(b); //printf("Found %ld guards for block %p, for %p\n", block_guards.size(), b, &in_guards); @@ -663,9 +655,9 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua if (full_blocks.count(b2) == 0 && partial_blocks.count(b2) == 0) continue; - //printf("%d %d %ld %ld\n", i, b2->idx, phi_ending_symbol_tables[b2->idx]->size(), phis->size()); - compareKeyset(phi_ending_symbol_tables[b2->idx], phis); - assert(phi_ending_symbol_tables[b2->idx]->size() == phis->size()); + //printf("%d %d %ld %ld\n", i, b2->idx, phi_ending_symbol_tables[b2]->size(), phis->size()); + compareKeyset(phi_ending_symbol_tables[b2], phis); + assert(phi_ending_symbol_tables[b2]->size() == phis->size()); } if (this_is_osr_entry) { @@ -693,14 +685,14 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua if (full_blocks.count(b2) == 0 && partial_blocks.count(b2) == 0) continue; - ConcreteCompilerVariable *v = (*phi_ending_symbol_tables[b2->idx])[it->first]; + ConcreteCompilerVariable *v = (*phi_ending_symbol_tables[b2])[it->first]; assert(v); assert(v->isGrabbed()); // Make sure they all prepared for the same type: ASSERT(it->second.first == v->getType(), "%d %d: %s %s %s", b->idx, b2->idx, it->first.c_str(), it->second.first->debugName().c_str(), v->getType()->debugName().c_str()); - llvm_phi->addIncoming(v->getValue(), llvm_exit_blocks[b->predecessors[j]->idx]); + llvm_phi->addIncoming(v->getValue(), llvm_exit_blocks[b->predecessors[j]]); } if (this_is_osr_entry) { @@ -732,24 +724,24 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList &out_gua } for (int i = 0; i < block_guards.size(); i++) { - emitters[i]->getBuilder()->CreateBr(llvm_entry_blocks[b->idx]); + emitters[i]->getBuilder()->CreateBr(llvm_entry_blocks[b]); delete emitters[i]; } } - for (int i = 0; i < source->cfg->blocks.size(); i++) { - if (ending_symbol_tables[i] == NULL) + for (CFGBlock *b : source->cfg->blocks) { + if (ending_symbol_tables[b] == NULL) continue; - for (SymbolTable::iterator it = ending_symbol_tables[i]->begin(); it != ending_symbol_tables[i]->end(); it++) { + for (SymbolTable::iterator it = ending_symbol_tables[b]->begin(); it != ending_symbol_tables[b]->end(); it++) { it->second->decvrefNodrop(); } - for (ConcreteSymbolTable::iterator it = phi_ending_symbol_tables[i]->begin(); it != phi_ending_symbol_tables[i]->end(); it++) { + for (ConcreteSymbolTable::iterator it = phi_ending_symbol_tables[b]->begin(); it != phi_ending_symbol_tables[b]->end(); it++) { it->second->decvrefNodrop(); } - delete phi_ending_symbol_tables[i]; - delete ending_symbol_tables[i]; - delete created_phis[i]; + delete phi_ending_symbol_tables[b]; + delete ending_symbol_tables[b]; + delete created_phis[b]; } if (entry_descriptor) { @@ -907,8 +899,8 @@ CompiledFunction* compileFunction(SourceInfo *source, const OSREntryDescriptor * BlockSet full_blocks, partial_blocks; if (entry_descriptor == NULL) { - for (int i = 0; i < source->cfg->blocks.size(); i++) { - full_blocks.insert(source->cfg->blocks[i]); + for (CFGBlock *b : source->cfg->blocks) { + full_blocks.insert(b); } } else { full_blocks.insert(entry_descriptor->backedge->target); diff --git a/src/codegen/irgen/irgenerator.cpp b/src/codegen/irgen/irgenerator.cpp index 3fdf8af87ca1b7723924d4d5abc7f09df7fabf0b..426081bb05c83e7cb017d92ecf91ad9812a060ed 100644 --- a/src/codegen/irgen/irgenerator.cpp +++ b/src/codegen/irgen/irgenerator.cpp @@ -163,7 +163,7 @@ class IRGeneratorImpl : public IRGenerator { IREmitterImpl emitter; SymbolTable symbol_table; - std::vector<llvm::BasicBlock*> &entry_blocks; + std::unordered_map<CFGBlock*, llvm::BasicBlock*> &entry_blocks; llvm::BasicBlock *curblock; CFGBlock *myblock; TypeAnalysis *types; @@ -178,8 +178,8 @@ class IRGeneratorImpl : public IRGenerator { } state; public: - IRGeneratorImpl(IRGenState *irstate, std::vector<llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial) : irstate(irstate), emitter(irstate), entry_blocks(entry_blocks), myblock(myblock), types(types), out_guards(out_guards), in_guards(in_guards), state(is_partial ? PARTIAL : RUNNING) { - llvm::BasicBlock* entry_block = entry_blocks[myblock->idx]; + IRGeneratorImpl(IRGenState *irstate, std::unordered_map<CFGBlock*, llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial) : irstate(irstate), emitter(irstate), entry_blocks(entry_blocks), myblock(myblock), types(types), out_guards(out_guards), in_guards(in_guards), state(is_partial ? PARTIAL : RUNNING) { + llvm::BasicBlock* entry_block = entry_blocks[myblock]; emitter.getBuilder()->SetInsertPoint(entry_block); curblock = entry_block; } @@ -1286,8 +1286,8 @@ class IRGeneratorImpl : public IRGenerator { val->decvref(emitter); llvm::Value *llvm_nonzero = nonzero->getValue(); - llvm::BasicBlock *iftrue = entry_blocks[node->iftrue->idx]; - llvm::BasicBlock *iffalse = entry_blocks[node->iffalse->idx]; + llvm::BasicBlock *iftrue = entry_blocks[node->iftrue]; + llvm::BasicBlock *iffalse = entry_blocks[node->iffalse]; nonzero->decvref(emitter); @@ -1461,7 +1461,7 @@ class IRGeneratorImpl : public IRGenerator { endBlock(FINISHED); - llvm::BasicBlock *target = entry_blocks[node->target->idx]; + llvm::BasicBlock *target = entry_blocks[node->target]; if (ENABLE_OSR && node->target->idx < myblock->idx && irstate->getEffortLevel() < EffortLevel::MAXIMAL) { assert(node->target->predecessors.size() > 1); @@ -1714,7 +1714,7 @@ class IRGeneratorImpl : public IRGenerator { }; -IRGenerator *createIRGenerator(IRGenState *irstate, std::vector<llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial) { +IRGenerator *createIRGenerator(IRGenState *irstate, std::unordered_map<CFGBlock*, llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial) { return new IRGeneratorImpl(irstate, entry_blocks, myblock, types, out_guards, in_guards, is_partial); } diff --git a/src/codegen/irgen/irgenerator.h b/src/codegen/irgen/irgenerator.h index 1151f9a66fce466d12cb9854fedca447d91c0d30..34b27fda1f5fc25876693d148d0a77ae4727da5e 100644 --- a/src/codegen/irgen/irgenerator.h +++ b/src/codegen/irgen/irgenerator.h @@ -198,7 +198,7 @@ class IRGenerator { }; IREmitter *createIREmitter(IRGenState *irstate); -IRGenerator *createIRGenerator(IRGenState *irstate, std::vector<llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial); +IRGenerator *createIRGenerator(IRGenState *irstate, std::unordered_map<CFGBlock*, llvm::BasicBlock*> &entry_blocks, CFGBlock *myblock, TypeAnalysis *types, GuardList &out_guards, const GuardList &in_guards, bool is_partial); } diff --git a/src/codegen/parser.cpp b/src/codegen/parser.cpp index 8d044ca7448126ff425ad561bfae48e0fffd5bb9..bf9d030058ad6b0a0574f154be9db8c82888c091 100644 --- a/src/codegen/parser.cpp +++ b/src/codegen/parser.cpp @@ -63,7 +63,7 @@ class BufferedReader { uint8_t readByte() { ensure(1); - assert(end > start); + assert(end > start && "premature eof"); if (VERBOSITY("parsing") >= 2) printf("readByte, now %d %d\n", start+1, end); return buf[start++]; diff --git a/src/core/cfg.cpp b/src/core/cfg.cpp index d03b9a3b786cfca3b227145feacd47d21b857ba3..b30936661cd68ed05b6791d1f7c34de50928ef2b 100644 --- a/src/core/cfg.cpp +++ b/src/core/cfg.cpp @@ -29,10 +29,20 @@ void CFGBlock::connectTo(CFGBlock *successor, bool allow_backedge) { assert(this->idx >= 0); ASSERT(successor->idx == -1 || successor->idx > this->idx, "edge from %d to %d", this->idx, successor->idx); } + //assert(successors.count(successor) == 0); + //assert(successor->predecessors.count(this) == 0); + successors.push_back(successor); successor->predecessors.push_back(this); } +void CFGBlock::unconnectFrom(CFGBlock *successor) { + //assert(successors.count(successor)); + //assert(successor->predecessors.count(this)); + successors.erase(std::remove(successors.begin(), successors.end(), successor), successors.end()); + successor->predecessors.erase(std::remove(successor->predecessors.begin(), successor->predecessors.end(), this), successor->predecessors.end()); +} + class CFGVisitor : public ASTVisitor { private: AST_TYPE::AST_TYPE root_type; @@ -664,7 +674,7 @@ class CFGVisitor : public ASTVisitor { virtual bool visit_functiondef(AST_FunctionDef* node) { push_back(node); return true; } virtual bool visit_global(AST_Global* node) { push_back(node); return true; } virtual bool visit_import(AST_Import* node) { push_back(node); return true; } - virtual bool visit_pass(AST_Pass* node) { push_back(node); return true; } + virtual bool visit_pass(AST_Pass* node) { return true; } virtual bool visit_assign(AST_Assign* node) { AST_Assign* remapped = new AST_Assign(); @@ -1184,8 +1194,8 @@ void CFG::print() { printf("%ld blocks\n", blocks.size()); PrintVisitor *pv = new PrintVisitor(4); for (int i = 0; i < blocks.size(); i++) { - printf("Block %d", i); CFGBlock *b = blocks[i]; + printf("Block %d", b->idx); if (b->info) printf(" '%s'", b->info); @@ -1228,9 +1238,7 @@ CFG* computeCFG(AST_TYPE::AST_TYPE root_type, std::vector<AST_stmt*> body) { //// // Check some properties expected by later stages: - // Block 0 is hard-coded to be the entry block, and shouldn't have any - // predecessors: - assert(rtn->blocks[0]->predecessors.size() == 0); + assert(rtn->getStartingBlock()->predecessors.size() == 0); // We need to generate the CFG in a way that doesn't have any critical edges, // since the ir generation requires that. @@ -1268,6 +1276,40 @@ CFG* computeCFG(AST_TYPE::AST_TYPE root_type, std::vector<AST_stmt*> body) { assert(rtn->blocks[i]->predecessors[0]->idx < i); } + // Prune unnecessary blocks from the CFG. + // Not strictly necessary, but makes the output easier to look at, + // and can make the analyses more efficient. + // The extra blocks would get merged by LLVM passes, so I'm not sure + // how much overall improvement there is. + for (CFGBlock* b : rtn->blocks) { + while (b->successors.size() == 1) { + CFGBlock *b2 = b->successors[0]; + if (b2->predecessors.size() != 1) + break; + + if (VERBOSITY()) { + //rtn->print(); + printf("Joining blocks %d and %d\n", b->idx, b2->idx); + } + + assert(b->body[b->body.size()-1]->type == AST_TYPE::Jump); + + b->body.pop_back(); + b->body.insert(b->body.end(), b2->body.begin(), b2->body.end()); + b->unconnectFrom(b2); + + for (CFGBlock *b3 : b2->successors) { + b->connectTo(b3, true); + b2->unconnectFrom(b3); + } + + rtn->blocks.erase(std::remove(rtn->blocks.begin(), rtn->blocks.end(), b2), rtn->blocks.end()); + delete b2; + } + } + + assert(rtn->getStartingBlock()->idx == 0); + /* // I keep on going back and forth about whether or not it's ok to reuse AST nodes. // On the one hand, it's nice to say that an AST* pointer uniquely identifies a spot diff --git a/src/core/cfg.h b/src/core/cfg.h index 9ed8262fd63f7e9ea90b0c392b05e334791defa7..7a086a2a702e48a7d94f3b6f70589bfc1bb7966e 100644 --- a/src/core/cfg.h +++ b/src/core/cfg.h @@ -51,6 +51,7 @@ class CFGBlock { } void connectTo(CFGBlock *successor, bool allow_backedge=false); + void unconnectFrom(CFGBlock *successor); void push_back(AST_stmt* node) { body.push_back(node); @@ -60,11 +61,19 @@ class CFGBlock { // Control Flow Graph class CFG { private: + int next_idx; public: std::vector<CFGBlock*> blocks; + CFG() : next_idx(0) {} + + CFGBlock* getStartingBlock() { + return blocks[0]; + } + CFGBlock* addBlock() { - int idx = blocks.size(); + int idx = next_idx; + next_idx++; CFGBlock* block = new CFGBlock(this, idx); blocks.push_back(block); @@ -78,7 +87,8 @@ class CFG { void placeBlock(CFGBlock *block) { assert(block->idx == -1); - block->idx = blocks.size(); + block->idx = next_idx; + next_idx++; blocks.push_back(block); }