Commit 2e030372 authored by Kevin Modzelewski's avatar Kevin Modzelewski

Merge branch 'deopt'

Fully switch to the new deopt system, and clean up a lot of stuff.
parents a8ff0670 1bfb56e8
......@@ -22,6 +22,7 @@
#include "analysis/scoping_analysis.h"
#include "codegen/codegen.h"
#include "codegen/compvars.h"
#include "codegen/osrentry.h"
#include "codegen/type_recording.h"
#include "core/ast.h"
#include "core/cfg.h"
......@@ -622,6 +623,7 @@ public:
return getTypeAtBlockStart(name, block->successors[0]);
}
ConcreteCompilerType* getTypeAtBlockStart(InternedString name, CFGBlock* block) override {
assert(starting_types.count(block));
CompilerType* base = starting_types[block][name];
ASSERT(base != NULL, "%s %d", name.c_str(), block->idx);
......@@ -672,40 +674,20 @@ public:
return changed;
}
static PropagatingTypeAnalysis* doAnalysis(CFG* cfg, const ParamNames& arg_names,
const std::vector<ConcreteCompilerType*>& arg_types,
SpeculationLevel speculation, ScopeInfo* scope_info) {
static PropagatingTypeAnalysis* doAnalysis(CFG* cfg, SpeculationLevel speculation, ScopeInfo* scope_info,
TypeMap&& initial_types, CFGBlock* initial_block) {
Timer _t("PropagatingTypeAnalysis::doAnalysis()");
AllTypeMap starting_types;
ExprTypeMap expr_types;
TypeSpeculations type_speculations;
assert(arg_names.totalParameters() == arg_types.size());
TypeMap& initial_types = starting_types[cfg->getStartingBlock()];
int i = 0;
for (; i < arg_names.args.size(); i++) {
initial_types[scope_info->internString(arg_names.args[i])] = unboxedType(arg_types[i]);
}
if (arg_names.vararg.size()) {
initial_types[scope_info->internString(arg_names.vararg)] = unboxedType(arg_types[i]);
i++;
}
if (arg_names.kwarg.size()) {
initial_types[scope_info->internString(arg_names.kwarg)] = unboxedType(arg_types[i]);
i++;
}
assert(i == arg_types.size());
std::unordered_set<CFGBlock*> in_queue;
std::priority_queue<CFGBlock*, std::vector<CFGBlock*>, CFGBlockMinIndex> queue;
queue.push(cfg->getStartingBlock());
in_queue.insert(cfg->getStartingBlock());
starting_types[initial_block] = std::move(initial_types);
queue.push(initial_block);
in_queue.insert(initial_block);
int num_evaluations = 0;
while (!queue.empty()) {
......@@ -787,6 +769,38 @@ TypeAnalysis* doTypeAnalysis(CFG* cfg, const ParamNames& arg_names, const std::v
// if (effort == EffortLevel::INTERPRETED) {
// return new NullTypeAnalysis();
//}
return PropagatingTypeAnalysis::doAnalysis(cfg, arg_names, arg_types, speculation, scope_info);
assert(arg_names.totalParameters() == arg_types.size());
TypeMap initial_types;
int i = 0;
for (; i < arg_names.args.size(); i++) {
initial_types[scope_info->internString(arg_names.args[i])] = unboxedType(arg_types[i]);
}
if (arg_names.vararg.size()) {
initial_types[scope_info->internString(arg_names.vararg)] = unboxedType(arg_types[i]);
i++;
}
if (arg_names.kwarg.size()) {
initial_types[scope_info->internString(arg_names.kwarg)] = unboxedType(arg_types[i]);
i++;
}
assert(i == arg_types.size());
return PropagatingTypeAnalysis::doAnalysis(cfg, speculation, scope_info, std::move(initial_types),
cfg->getStartingBlock());
}
TypeAnalysis* doTypeAnalysis(CFG* cfg, const OSREntryDescriptor* entry_descriptor, EffortLevel effort,
TypeAnalysis::SpeculationLevel speculation, ScopeInfo* scope_info) {
// if (effort == EffortLevel::INTERPRETED) {
// return new NullTypeAnalysis();
//}
TypeMap initial_types(entry_descriptor->args.begin(), entry_descriptor->args.end());
return PropagatingTypeAnalysis::doAnalysis(cfg, speculation, scope_info, std::move(initial_types),
entry_descriptor->backedge->target);
}
}
......@@ -27,6 +27,7 @@ class ScopeInfo;
class CFGBlock;
class BoxedClass;
class AST_expr;
class OSREntryDescriptor;
class TypeAnalysis {
public:
......@@ -45,6 +46,8 @@ public:
TypeAnalysis* doTypeAnalysis(CFG* cfg, const ParamNames& param_names,
const std::vector<ConcreteCompilerType*>& arg_types, EffortLevel effort,
TypeAnalysis::SpeculationLevel speculation, ScopeInfo* scope_info);
TypeAnalysis* doTypeAnalysis(CFG* cfg, const OSREntryDescriptor* entry_descriptor, EffortLevel effort,
TypeAnalysis::SpeculationLevel speculation, ScopeInfo* scope_info);
}
#endif
......@@ -202,29 +202,24 @@ static bool compareBlockPairs(const std::pair<CFGBlock*, CFGBlock*>& p1, const s
return p1.first->idx < p2.first->idx;
}
static std::vector<std::pair<CFGBlock*, CFGBlock*>>
computeBlockTraversalOrder(const BlockSet& full_blocks, const BlockSet& partial_blocks, CFGBlock* start) {
static std::vector<std::pair<CFGBlock*, CFGBlock*>> computeBlockTraversalOrder(const BlockSet& blocks,
CFGBlock* start) {
std::vector<std::pair<CFGBlock*, CFGBlock*>> rtn;
std::unordered_set<CFGBlock*> in_queue;
if (start) {
assert(full_blocks.count(start));
assert(blocks.count(start));
in_queue.insert(start);
rtn.push_back(std::make_pair(start, (CFGBlock*)NULL));
}
for (CFGBlock* b : partial_blocks) {
in_queue.insert(b);
rtn.push_back(std::make_pair(b, (CFGBlock*)NULL));
}
// It's important for debugging purposes that the order is deterministic, but the iteration
// over the BlockSet is not:
std::sort(rtn.begin(), rtn.end(), compareBlockPairs);
int idx = 0;
while (rtn.size() < full_blocks.size() + partial_blocks.size()) {
while (rtn.size() < blocks.size()) {
// TODO: come up with an alternative algorithm that outputs
// the blocks in "as close to in-order as possible".
// Do this by iterating over all blocks and picking the smallest one
......@@ -234,7 +229,7 @@ computeBlockTraversalOrder(const BlockSet& full_blocks, const BlockSet& partial_
for (int i = 0; i < cur->successors.size(); i++) {
CFGBlock* b = cur->successors[i];
assert(full_blocks.count(b) || partial_blocks.count(b));
assert(blocks.count(b));
if (in_queue.count(b))
continue;
......@@ -245,11 +240,11 @@ computeBlockTraversalOrder(const BlockSet& full_blocks, const BlockSet& partial_
idx++;
}
if (rtn.size() == full_blocks.size() + partial_blocks.size())
if (rtn.size() == blocks.size())
break;
CFGBlock* best = NULL;
for (CFGBlock* b : full_blocks) {
for (CFGBlock* b : blocks) {
if (in_queue.count(b))
continue;
......@@ -268,7 +263,7 @@ computeBlockTraversalOrder(const BlockSet& full_blocks, const BlockSet& partial_
rtn.push_back(std::make_pair(best, (CFGBlock*)NULL));
}
ASSERT(rtn.size() == full_blocks.size() + partial_blocks.size(), "%ld\n", rtn.size());
ASSERT(rtn.size() == blocks.size(), "%ld\n", rtn.size());
return rtn;
}
......@@ -330,9 +325,8 @@ llvm::Value* handlePotentiallyUndefined(ConcreteCompilerVariable* is_defined_var
return phi;
}
static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_guards, const GuardList& in_guards,
TypeAnalysis* types, const OSREntryDescriptor* entry_descriptor, const BlockSet& full_blocks,
const BlockSet& partial_blocks) {
static void emitBBs(IRGenState* irstate, TypeAnalysis* types, const OSREntryDescriptor* entry_descriptor,
const BlockSet& blocks) {
SourceInfo* source = irstate->getSourceInfo();
EffortLevel effort = irstate->getEffortLevel();
CompiledFunction* cf = irstate->getCurFunction();
......@@ -340,22 +334,23 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
// llvm::MDNode* func_info = irstate->getFuncDbgInfo();
if (entry_descriptor != NULL)
assert(full_blocks.count(source->cfg->getStartingBlock()) == 0);
assert(blocks.count(source->cfg->getStartingBlock()) == 0);
// We need the entry blocks pre-allocated so that we can jump forward to them.
std::unordered_map<CFGBlock*, llvm::BasicBlock*> llvm_entry_blocks;
for (CFGBlock* block : source->cfg->blocks) {
if (partial_blocks.count(block) == 0 && full_blocks.count(block) == 0) {
if (blocks.count(block) == 0) {
llvm_entry_blocks[block] = NULL;
continue;
}
char buf[40];
snprintf(buf, 40, "%s_block%d", bb_type, block->idx);
snprintf(buf, 40, "block%d", block->idx);
llvm_entry_blocks[block] = llvm::BasicBlock::Create(g.context, buf, irstate->getLLVMFunction());
}
llvm::BasicBlock* osr_entry_block = NULL; // the function entry block, where we add the type guards
llvm::BasicBlock* osr_entry_block
= NULL; // the function entry block, where we add the type guards [no guards anymore]
llvm::BasicBlock* osr_unbox_block_end = NULL; // the block after type guards where we up/down-convert things
ConcreteSymbolTable* osr_syms = NULL; // syms after conversion
if (entry_descriptor != NULL) {
......@@ -376,10 +371,6 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
CFGBlock* target_block = entry_descriptor->backedge->target;
// Currently we AND all the type guards together and then do just a single jump;
// guard_val is the current AND'd value, or NULL if there weren't any guards
llvm::Value* guard_val = NULL;
std::vector<llvm::Value*> func_args;
for (llvm::Function::arg_iterator AI = irstate->getLLVMFunction()->arg_begin();
AI != irstate->getLLVMFunction()->arg_end(); AI++) {
......@@ -439,83 +430,8 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
v = converted->getValue();
delete converted;
} else {
ASSERT(p.second == UNKNOWN, "%s", p.second->debugName().c_str());
BoxedClass* speculated_class = NULL;
if (phi_type == INT) {
speculated_class = int_cls;
} else if (phi_type == FLOAT) {
speculated_class = float_cls;
} else if (phi_type == BOOL) {
speculated_class = bool_cls;
} else {
speculated_class = phi_type->guaranteedClass();
}
ASSERT(speculated_class, "%s", phi_type->debugName().c_str());
assert(p.first.str()[0] != '!');
// TODO cache this
InternedString is_defined_name = getIsDefinedName(p.first, source->getInternedStrings());
llvm::Value* prev_guard_val = NULL;
ConcreteCompilerVariable* is_defined_var = NULL;
if (entry_descriptor->args.count(is_defined_name)) {
// relying on the fact that we are iterating over the names in order
// and the fake names precede the real names:
assert(osr_syms->count(is_defined_name));
is_defined_var = (*osr_syms)[is_defined_name];
assert(is_defined_var->getType() == BOOL);
}
guard_val = handlePotentiallyUndefined(
is_defined_var, g.i1, osr_entry_block_end, *entry_emitter, true,
[speculated_class, guard_val, &p, from_arg](IREmitter& emitter) {
llvm::Value* type_check = ConcreteCompilerVariable(p.second, from_arg, true)
.makeClassCheck(emitter, speculated_class);
// printf("Making osr entry guard to make sure that %s is a %s (given as a
// %s)\n", p.first.c_str(),
// getNameOfClass(speculated_class)->c_str(),
// p.second->debugName().c_str());
if (guard_val) {
return emitter.getBuilder()->CreateAnd(guard_val, type_check);
} else {
return type_check;
}
},
[guard_val](IREmitter& emitter) { return guard_val ? guard_val : getConstantInt(1, g.i1); });
if (speculated_class == int_cls) {
v = handlePotentiallyUndefined(
is_defined_var, INT->llvmType(), osr_unbox_block_end, *unbox_emitter, true,
[from_arg](IREmitter& emitter) {
auto v = emitter.getBuilder()->CreateCall(g.funcs.unboxInt, from_arg);
(new ConcreteCompilerVariable(BOXED_INT, from_arg, true))->decvref(emitter);
return v;
},
[](IREmitter& emitter) { return llvm::UndefValue::get(INT->llvmType()); });
} else if (speculated_class == float_cls) {
v = handlePotentiallyUndefined(
is_defined_var, FLOAT->llvmType(), osr_unbox_block_end, *unbox_emitter, true,
[from_arg](IREmitter& emitter) {
auto v = emitter.getBuilder()->CreateCall(g.funcs.unboxFloat, from_arg);
(new ConcreteCompilerVariable(BOXED_FLOAT, from_arg, true))->decvref(emitter);
return v;
},
[](IREmitter& emitter) { return llvm::UndefValue::get(FLOAT->llvmType()); });
} else if (speculated_class == bool_cls) {
v = handlePotentiallyUndefined(
is_defined_var, BOOL->llvmType(), osr_unbox_block_end, *unbox_emitter, true,
[from_arg](IREmitter& emitter) {
auto v = emitter.getBuilder()->CreateCall(g.funcs.unboxBool, from_arg);
(new ConcreteCompilerVariable(BOXED_BOOL, from_arg, true))->decvref(emitter);
return boolFromI1(emitter, v)->getValue();
},
[](IREmitter& emitter) { return llvm::UndefValue::get(BOOL->llvmType()); });
} else {
assert(phi_type == typeFromClass(speculated_class));
v = from_arg;
}
RELEASE_ASSERT(0, "OSR'd with a %s into a type inference of a %s?\n", p.second->debugName().c_str(),
phi_type->debugName().c_str());
}
if (VERBOSITY("irgen"))
......@@ -524,18 +440,7 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
(*osr_syms)[p.first] = new ConcreteCompilerVariable(phi_type, v, true);
}
if (guard_val) {
// Create the guard with both branches leading to the success_bb,
// and let the deopt path change the failure case to point to the
// as-yet-unknown deopt block.
// TODO Not the best approach since if we fail to do that patching,
// the guard will just silently be ignored.
llvm::BranchInst* br
= entry_emitter->getBuilder()->CreateCondBr(guard_val, osr_unbox_block, osr_unbox_block);
out_guards.registerGuardForBlockEntry(target_block, br, *initial_syms);
} else {
entry_emitter->getBuilder()->CreateBr(osr_unbox_block);
}
entry_emitter->getBuilder()->CreateBr(osr_unbox_block);
unbox_emitter->getBuilder()->CreateBr(llvm_entry_blocks[entry_descriptor->backedge->target]);
for (const auto& p : *initial_syms) {
......@@ -561,7 +466,7 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
CFGBlock* initial_block = NULL;
if (entry_descriptor) {
initial_block = entry_descriptor->backedge->target;
} else if (full_blocks.count(source->cfg->getStartingBlock())) {
} else if (blocks.count(source->cfg->getStartingBlock())) {
initial_block = source->cfg->getStartingBlock();
}
......@@ -572,8 +477,7 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
// with a lower index value, so if the entry block is 0 then we can iterate in index
// order.
// The entry block doesn't have to be zero, so we have to calculate an allowable order here:
std::vector<std::pair<CFGBlock*, CFGBlock*>> traversal_order
= computeBlockTraversalOrder(full_blocks, partial_blocks, initial_block);
std::vector<std::pair<CFGBlock*, CFGBlock*>> traversal_order = computeBlockTraversalOrder(blocks, initial_block);
std::unordered_set<CFGBlock*> into_hax;
for (int _i = 0; _i < traversal_order.size(); _i++) {
......@@ -581,14 +485,9 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
CFGBlock* pred = traversal_order[_i].second;
if (VERBOSITY("irgen") >= 1)
printf("processing %s block %d\n", bb_type, block->idx);
printf("processing block %d\n", block->idx);
bool is_partial = false;
if (partial_blocks.count(block)) {
if (VERBOSITY("irgen") >= 1)
printf("is partial block\n");
is_partial = true;
} else if (!full_blocks.count(block)) {
if (!blocks.count(block)) {
if (VERBOSITY("irgen") >= 1)
printf("Skipping this block\n");
// created_phis[block] = NULL;
......@@ -598,23 +497,16 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
continue;
}
std::unique_ptr<IRGenerator> generator(
createIRGenerator(irstate, llvm_entry_blocks, block, types, out_guards, in_guards, is_partial));
std::unique_ptr<IRGenerator> generator(createIRGenerator(irstate, llvm_entry_blocks, block, types));
llvm::BasicBlock* entry_block_end = llvm_entry_blocks[block];
std::unique_ptr<IREmitter> emitter(createIREmitter(irstate, entry_block_end));
PHITable* phis = NULL;
if (!is_partial) {
phis = new PHITable();
created_phis[block] = phis;
}
PHITable* phis = new PHITable();
created_phis[block] = phis;
// Set initial symbol table:
if (is_partial) {
// pass
} else if (block == source->cfg->getStartingBlock()) {
if (block == source->cfg->getStartingBlock()) {
assert(entry_descriptor == NULL);
assert(strcmp("opt", bb_type) == 0);
if (ENABLE_REOPT && effort < EffortLevel::MAXIMAL && source->ast != NULL
&& source->ast->type != AST_TYPE::Module) {
......@@ -733,7 +625,7 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
}
} else {
assert(pred);
assert(full_blocks.count(pred) || partial_blocks.count(pred));
assert(blocks.count(pred));
if (block->predecessors.size() == 1) {
// If this block has only one predecessor, it by definition doesn't need any phi nodes.
......@@ -828,12 +720,9 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
bool this_is_osr_entry = (entry_descriptor && b == entry_descriptor->backedge->target);
const std::vector<GuardList::BlockEntryGuard*>& block_guards = in_guards.getGuardsForBlock(b);
// printf("Found %ld guards for block %p, for %p\n", block_guards.size(), b, &in_guards);
for (int j = 0; j < b->predecessors.size(); j++) {
CFGBlock* b2 = b->predecessors[j];
if (full_blocks.count(b2) == 0 && partial_blocks.count(b2) == 0)
if (blocks.count(b2) == 0)
continue;
// printf("(%d %ld) -> (%d %ld)\n", b2->idx, phi_ending_symbol_tables[b2]->size(), b->idx, phis->size());
......@@ -845,19 +734,6 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
compareKeyset(osr_syms, phis);
}
std::vector<IREmitter*> emitters;
std::vector<llvm::BasicBlock*> offramps;
for (int i = 0; i < block_guards.size(); i++) {
compareKeyset(&block_guards[i]->symbol_table, phis);
llvm::BasicBlock* off_ramp = llvm::BasicBlock::Create(g.context, "deopt_ramp", irstate->getLLVMFunction());
offramps.push_back(off_ramp);
IREmitter* emitter = createIREmitter(irstate, offramps[offramps.size() - 1]);
emitters.push_back(emitter);
block_guards[i]->branch->setSuccessor(1, off_ramp);
}
// Can't always add the phi incoming value right away, since we may have to create more
// basic blocks as part of type coercion.
// Intsead, just make a record of the phi node, value, and the location of the from-BB,
......@@ -868,7 +744,7 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
llvm::PHINode* llvm_phi = it->second.second;
for (int j = 0; j < b->predecessors.size(); j++) {
CFGBlock* b2 = b->predecessors[j];
if (full_blocks.count(b2) == 0 && partial_blocks.count(b2) == 0)
if (blocks.count(b2) == 0)
continue;
ConcreteCompilerVariable* v = (*phi_ending_symbol_tables[b2])[it->first];
......@@ -892,97 +768,10 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
}
InternedString is_defined_name = getIsDefinedName(it->first, source->getInternedStrings());
for (int i = 0; i < block_guards.size(); i++) {
GuardList::BlockEntryGuard* guard = block_guards[i];
IREmitter* emitter = emitters[i];
auto is_defined_it = guard->symbol_table.find(is_defined_name);
ConcreteCompilerVariable* is_defined_var = nullptr;
if (is_defined_it != guard->symbol_table.end()) {
auto var = is_defined_it->second;
assert(var->getType() == BOOL);
is_defined_var = static_cast<ConcreteCompilerVariable*>(var);
}
CompilerVariable* unconverted = NULL;
llvm::Value* val = handlePotentiallyUndefined(
is_defined_var, it->second.first->llvmType(), offramps[i], *emitter, true,
[=, &unconverted](IREmitter& emitter) {
unconverted = guard->symbol_table[it->first];
ConcreteCompilerVariable* v;
if (unconverted->canConvertTo(it->second.first)) {
v = unconverted->makeConverted(emitter, it->second.first);
assert(v);
assert(v->isGrabbed());
} else {
// This path is for handling the case that we did no type analysis in the previous tier,
// but in this tier we know that even in the deopt branch with no speculations, that
// the type is more refined than what we got from the previous tier.
//
// We're going to blindly assume that we're right about what the type should be.
assert(unconverted->getType() == UNKNOWN);
assert(strcmp(bb_type, "deopt") == 0);
ConcreteCompilerVariable* converted = unconverted->makeConverted(emitter, UNKNOWN);
if (it->second.first->llvmType() == g.llvm_value_type_ptr) {
v = new ConcreteCompilerVariable(it->second.first, converted->getValue(), true);
} else if (it->second.first == FLOAT) {
llvm::Value* unboxed
= emitter.getBuilder()->CreateCall(g.funcs.unboxFloat, converted->getValue());
v = new ConcreteCompilerVariable(FLOAT, unboxed, true);
} else if (it->second.first == INT) {
llvm::Value* unboxed
= emitter.getBuilder()->CreateCall(g.funcs.unboxInt, converted->getValue());
v = new ConcreteCompilerVariable(INT, unboxed, true);
} else if (it->second.first == BOOL) {
llvm::Value* unboxed
= emitter.getBuilder()->CreateCall(g.funcs.unboxBool, converted->getValue());
v = boolFromI1(emitter, unboxed);
} else {
printf("%s\n", it->second.first->debugName().c_str());
abort();
}
converted->decvref(emitter);
/*
if (speculated_class == int_cls) {
v = unbox_emitter->getBuilder()->CreateCall(g.funcs.unboxInt, from_arg);
(new ConcreteCompilerVariable(BOXED_INT, from_arg, true))->decvref(*unbox_emitter);
} else if (speculated_class == float_cls) {
v = unbox_emitter->getBuilder()->CreateCall(g.funcs.unboxFloat, from_arg);
(new ConcreteCompilerVariable(BOXED_FLOAT, from_arg, true))->decvref(*unbox_emitter);
} else {
assert(phi_type == typeFromClass(speculated_class));
v = from_arg;
}
*/
}
ASSERT(it->second.first == v->getType(), "");
assert(it->second.first->llvmType() == v->getValue()->getType());
return v->getValue();
},
[=](IREmitter& emitter) { return llvm::UndefValue::get(it->second.first->llvmType()); });
phi_args.emplace_back(llvm_phi, val, offramps[i]);
// TODO not sure if this is right:
unconverted->decvref(*emitter);
}
}
for (auto t : phi_args) {
std::get<0>(t)->addIncoming(std::get<1>(t), std::get<2>(t));
}
for (int i = 0; i < block_guards.size(); i++) {
emitters[i]->getBuilder()->CreateBr(llvm_entry_blocks[b]);
delete emitters[i];
}
}
for (CFGBlock* b : source->cfg->blocks) {
......@@ -1009,23 +798,17 @@ static void emitBBs(IRGenState* irstate, const char* bb_type, GuardList& out_gua
}
}
static void computeBlockSetClosure(BlockSet& full_blocks, BlockSet& partial_blocks) {
static void computeBlockSetClosure(BlockSet& blocks) {
if (VERBOSITY("irgen") >= 1) {
printf("Initial full:");
for (CFGBlock* b : full_blocks) {
printf(" %d", b->idx);
}
printf("\n");
printf("Initial partial:");
for (CFGBlock* b : partial_blocks) {
printf("Initial:");
for (CFGBlock* b : blocks) {
printf(" %d", b->idx);
}
printf("\n");
}
std::vector<CFGBlock*> q;
BlockSet expanded;
q.insert(q.end(), full_blocks.begin(), full_blocks.end());
q.insert(q.end(), partial_blocks.begin(), partial_blocks.end());
q.insert(q.end(), blocks.begin(), blocks.end());
while (q.size()) {
CFGBlock* b = q.back();
......@@ -1037,20 +820,14 @@ static void computeBlockSetClosure(BlockSet& full_blocks, BlockSet& partial_bloc
for (int i = 0; i < b->successors.size(); i++) {
CFGBlock* b2 = b->successors[i];
partial_blocks.erase(b2);
full_blocks.insert(b2);
blocks.insert(b2);
q.push_back(b2);
}
}
if (VERBOSITY("irgen") >= 1) {
printf("Ending full:");
for (CFGBlock* b : full_blocks) {
printf(" %d", b->idx);
}
printf("\n");
printf("Ending partial:");
for (CFGBlock* b : partial_blocks) {
printf("Ending:");
for (CFGBlock* b : blocks) {
printf(" %d", b->idx);
}
printf("\n");
......@@ -1103,6 +880,8 @@ CompiledFunction* doCompile(SourceInfo* source, ParamNames* param_names, const O
Timer _t2;
long irgen_us = 0;
assert((entry_descriptor != NULL) + (spec != NULL) == 1);
if (VERBOSITY("irgen") >= 1)
source->cfg->print();
......@@ -1119,12 +898,14 @@ CompiledFunction* doCompile(SourceInfo* source, ParamNames* param_names, const O
////
// Initializing the llvm-level structures:
int nargs = param_names->totalParameters();
ASSERT(nargs == spec->arg_types.size(), "%d %ld", nargs, spec->arg_types.size());
std::vector<llvm::Type*> llvm_arg_types;
if (entry_descriptor == NULL) {
assert(spec);
int nargs = param_names->totalParameters();
ASSERT(nargs == spec->arg_types.size(), "%d %ld", nargs, spec->arg_types.size());
if (source->getScopeInfo()->takesClosure())
llvm_arg_types.push_back(g.llvm_closure_type_ptr);
......@@ -1152,13 +933,17 @@ CompiledFunction* doCompile(SourceInfo* source, ParamNames* param_names, const O
}
}
llvm::FunctionType* ft = llvm::FunctionType::get(spec->rtn_type->llvmType(), llvm_arg_types, false /*vararg*/);
CompiledFunction* cf
= new CompiledFunction(NULL, spec, (effort == EffortLevel::INTERPRETED), NULL, NULL, effort, entry_descriptor);
llvm::FunctionType* ft = llvm::FunctionType::get(cf->getReturnType()->llvmType(), llvm_arg_types, false /*vararg*/);
llvm::Function* f = llvm::Function::Create(ft, llvm::Function::ExternalLinkage, name, g.cur_module);
// g.func_registry.registerFunction(f, g.cur_module);
CompiledFunction* cf
= new CompiledFunction(f, spec, (effort == EffortLevel::INTERPRETED), NULL, NULL, effort, entry_descriptor);
cf->func = f;
// g.func_registry.registerFunction(f, g.cur_module);
llvm::MDNode* dbg_funcinfo = setupDebugInfo(source, f, nameprefix);
......@@ -1168,61 +953,32 @@ CompiledFunction* doCompile(SourceInfo* source, ParamNames* param_names, const O
EffortLevel min_speculation_level = EffortLevel::MAXIMAL;
if (ENABLE_SPECULATION && effort >= min_speculation_level)
speculation_level = TypeAnalysis::SOME;
TypeAnalysis* types
= doTypeAnalysis(source->cfg, *param_names, spec->arg_types, effort, speculation_level, source->getScopeInfo());
TypeAnalysis* types;
if (entry_descriptor)
types = doTypeAnalysis(source->cfg, entry_descriptor, effort, speculation_level, source->getScopeInfo());
else
types = doTypeAnalysis(source->cfg, *param_names, spec->arg_types, effort, speculation_level,
source->getScopeInfo());
_t2.split();
GuardList guards;
_t2.split();
BlockSet full_blocks, partial_blocks;
BlockSet blocks;
if (entry_descriptor == NULL) {
for (CFGBlock* b : source->cfg->blocks) {
full_blocks.insert(b);
blocks.insert(b);
}
} else {
full_blocks.insert(entry_descriptor->backedge->target);
computeBlockSetClosure(full_blocks, partial_blocks);
blocks.insert(entry_descriptor->backedge->target);
computeBlockSetClosure(blocks);
}
IRGenState irstate(cf, source, param_names, getGCBuilder(), dbg_funcinfo);
emitBBs(&irstate, "opt", guards, GuardList(), types, entry_descriptor, full_blocks, partial_blocks);
emitBBs(&irstate, types, entry_descriptor, blocks);
// De-opt handling:
if (!guards.isEmpty()) {
BlockSet deopt_full_blocks, deopt_partial_blocks;
GuardList deopt_guards;
// typedef std::unordered_map<CFGBlock*, std::unordered_map<AST_expr*, GuardList::ExprTypeGuard*> > Worklist;
// Worklist guard_worklist;
guards.getBlocksWithGuards(deopt_full_blocks);
for (const auto& p : guards.exprGuards()) {
deopt_partial_blocks.insert(p.second->cfg_block);
}
computeBlockSetClosure(deopt_full_blocks, deopt_partial_blocks);
assert(deopt_full_blocks.size() || deopt_partial_blocks.size());
irgen_us += _t2.split();
TypeAnalysis* deopt_types = doTypeAnalysis(source->cfg, *param_names, spec->arg_types, effort,
TypeAnalysis::NONE, source->getScopeInfo());
_t2.split();
emitBBs(&irstate, "deopt", deopt_guards, guards, deopt_types, NULL, deopt_full_blocks, deopt_partial_blocks);
assert(deopt_guards.isEmpty());
deopt_guards.assertGotPatched();
delete deopt_types;
}
guards.assertGotPatched();
for (const auto& p : guards.exprGuards()) {
delete p.second;
}
delete types;
if (VERBOSITY("irgen") >= 1) {
......
......@@ -166,9 +166,10 @@ static void compileIR(CompiledFunction* cf, EffortLevel effort) {
// should only be called after checking to see if the other versions would work.
// The codegen_lock needs to be held in W mode before calling this function:
CompiledFunction* compileFunction(CLFunction* f, FunctionSpecialization* spec, EffortLevel effort,
const OSREntryDescriptor* entry) {
const OSREntryDescriptor* entry_descriptor) {
Timer _t("for compileFunction()");
assert(spec);
assert((entry_descriptor != NULL) + (spec != NULL) == 1);
ASSERT(f->versions.size() < 20, "%ld", f->versions.size());
SourceInfo* source = f->source;
......@@ -180,21 +181,21 @@ CompiledFunction* compileFunction(CLFunction* f, FunctionSpecialization* spec, E
std::string s;
llvm::raw_string_ostream ss(s);
ss << "\033[34;1mJIT'ing " << name << " with signature (";
for (int i = 0; i < spec->arg_types.size(); i++) {
if (i > 0)
ss << ", ";
ss << spec->arg_types[i]->debugName();
// spec->arg_types[i]->llvmType()->print(ss);
if (spec) {
ss << "\033[34;1mJIT'ing " << name << " with signature (";
for (int i = 0; i < spec->arg_types.size(); i++) {
if (i > 0)
ss << ", ";
ss << spec->arg_types[i]->debugName();
// spec->arg_types[i]->llvmType()->print(ss);
}
ss << ") -> ";
ss << spec->rtn_type->debugName();
} else {
ss << "\nDoing OSR-entry partial compile of " << name << ", starting with backedge to block "
<< entry_descriptor->backedge->target->idx << '\n';
}
ss << ") -> ";
ss << spec->rtn_type->debugName();
// spec->rtn_type->llvmType()->print(ss);
ss << " at effort level " << (int)effort;
if (entry != NULL) {
ss << "\nDoing OSR-entry partial compile, starting with backedge to block " << entry->backedge->target->idx
<< '\n';
}
ss << "\033[0m";
printf("%s\n", ss.str().c_str());
}
......@@ -216,9 +217,10 @@ CompiledFunction* compileFunction(CLFunction* f, FunctionSpecialization* spec, E
CompiledFunction* cf = 0;
if (effort == EffortLevel::INTERPRETED) {
assert(!entry_descriptor);
cf = new CompiledFunction(0, spec, true, NULL, NULL, effort, 0);
} else {
cf = doCompile(source, &f->param_names, entry, effort, spec, name);
cf = doCompile(source, &f->param_names, entry_descriptor, effort, spec, name);
compileIR(cf, effort);
}
......@@ -327,6 +329,12 @@ void CompiledFunction::speculationFailed() {
}
}
ConcreteCompilerType* CompiledFunction::getReturnType() {
if (spec)
return spec->rtn_type;
return entry_descriptor->cf->getReturnType();
}
/// Reoptimizes the given function version at the new effort level.
/// The cf must be an active version in its parents CLFunction; the given
/// version will be replaced by the new version, which will be returned.
......@@ -385,8 +393,7 @@ CompiledFunction* compilePartialFuncInternal(OSRExit* exit) {
EffortLevel new_effort = EffortLevel::MAXIMAL;
if (exit->parent_cf->effort == EffortLevel::INTERPRETED)
new_effort = EffortLevel::MINIMAL;
CompiledFunction* compiled
= compileFunction(exit->parent_cf->clfunc, exit->parent_cf->spec, new_effort, exit->entry);
CompiledFunction* compiled = compileFunction(exit->parent_cf->clfunc, NULL, new_effort, exit->entry);
assert(compiled == new_cf);
stat_osr_compiles.log();
......
......@@ -105,26 +105,6 @@ ScopeInfo* IRGenState::getScopeInfoForNode(AST* node) {
return source->scoping->getScopeInfoForNode(node);
}
GuardList::ExprTypeGuard::ExprTypeGuard(CFGBlock* cfg_block, llvm::BranchInst* branch, AST_expr* ast_node,
CompilerVariable* val, const SymbolTable& st)
: cfg_block(cfg_block), branch(branch), ast_node(ast_node) {
DupCache cache;
this->val = val->dup(cache);
for (const auto& p : st) {
this->st[p.first] = p.second->dup(cache);
}
}
GuardList::BlockEntryGuard::BlockEntryGuard(CFGBlock* cfg_block, llvm::BranchInst* branch,
const SymbolTable& symbol_table)
: cfg_block(cfg_block), branch(branch) {
DupCache cache;
for (const auto& p : symbol_table) {
this->symbol_table[p.first] = p.second->dup(cache);
}
}
class IREmitterImpl : public IREmitter {
private:
IRGenState* irstate;
......@@ -308,11 +288,8 @@ private:
std::unordered_map<CFGBlock*, llvm::BasicBlock*>& entry_blocks;
CFGBlock* myblock;
TypeAnalysis* types;
GuardList& out_guards;
const GuardList& in_guards;
enum State {
PARTIAL, // running through a partial block, waiting to hit the first in_guard
RUNNING, // normal
DEAD, // passed a Return statement; still syntatically valid but the code should not be compiled
FINISHED, // passed a pseudo-node such as Branch or Jump; internal error if there are any more statements
......@@ -320,11 +297,9 @@ private:
public:
IRGeneratorImpl(IRGenState* irstate, std::unordered_map<CFGBlock*, llvm::BasicBlock*>& entry_blocks,
CFGBlock* myblock, TypeAnalysis* types, GuardList& out_guards, const GuardList& in_guards,
bool is_partial)
CFGBlock* myblock, TypeAnalysis* types)
: irstate(irstate), curblock(entry_blocks[myblock]), emitter(irstate, curblock, this),
entry_blocks(entry_blocks), myblock(myblock), types(types), out_guards(out_guards), in_guards(in_guards),
state(is_partial ? PARTIAL : RUNNING) {}
entry_blocks(entry_blocks), myblock(myblock), types(types), state(RUNNING) {}
~IRGeneratorImpl() { delete emitter.getBuilder(); }
......@@ -386,8 +361,6 @@ private:
}
CompilerVariable* evalAttribute(AST_Attribute* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* value = evalExpr(node->value, unw_info);
CompilerVariable* rtn = value->getattr(emitter, getOpInfoForNode(node, unw_info), &node->attr.str(), false);
......@@ -396,8 +369,6 @@ private:
}
CompilerVariable* evalClsAttribute(AST_ClsAttribute* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* value = evalExpr(node->value, unw_info);
CompilerVariable* rtn = value->getattr(emitter, getOpInfoForNode(node, unw_info), &node->attr.str(), true);
value->decvref(emitter);
......@@ -661,8 +632,6 @@ private:
CompilerVariable* _evalBinExp(AST* node, CompilerVariable* left, CompilerVariable* right, AST_TYPE::AST_TYPE type,
BinExpType exp_type, UnwindInfo unw_info) {
assert(state != PARTIAL);
assert(left);
assert(right);
......@@ -674,8 +643,6 @@ private:
}
CompilerVariable* evalBinOp(AST_BinOp* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* left = evalExpr(node->left, unw_info);
CompilerVariable* right = evalExpr(node->right, unw_info);
......@@ -688,8 +655,6 @@ private:
}
CompilerVariable* evalAugBinOp(AST_AugBinOp* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* left = evalExpr(node->left, unw_info);
CompilerVariable* right = evalExpr(node->right, unw_info);
......@@ -702,8 +667,6 @@ private:
}
CompilerVariable* evalCompare(AST_Compare* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
RELEASE_ASSERT(node->ops.size() == 1, "");
CompilerVariable* left = evalExpr(node->left, unw_info);
......@@ -719,8 +682,6 @@ private:
}
CompilerVariable* evalCall(AST_Call* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
bool is_callattr;
bool callattr_clsonly = false;
const std::string* attr = NULL;
......@@ -798,8 +759,6 @@ private:
}
CompilerVariable* evalDict(AST_Dict* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
llvm::Value* v = emitter.getBuilder()->CreateCall(g.funcs.createDict);
ConcreteCompilerVariable* rtn = new ConcreteCompilerVariable(DICT, v, true);
if (node->keys.size()) {
......@@ -834,15 +793,9 @@ private:
inst->setMetadata(message, mdnode);
}
CompilerVariable* evalIndex(AST_Index* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
return evalExpr(node->value, unw_info);
}
CompilerVariable* evalIndex(AST_Index* node, UnwindInfo unw_info) { return evalExpr(node->value, unw_info); }
CompilerVariable* evalLambda(AST_Lambda* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
AST_Return* expr = new AST_Return();
expr->value = node->body;
......@@ -856,8 +809,6 @@ private:
CompilerVariable* evalList(AST_List* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
std::vector<CompilerVariable*> elts;
for (int i = 0; i < node->elts.size(); i++) {
CompilerVariable* value = evalExpr(node->elts[i], unw_info);
......@@ -911,8 +862,6 @@ private:
}
CompilerVariable* evalName(AST_Name* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
auto scope_info = irstate->getScopeInfo();
bool is_kill = irstate->getSourceInfo()->liveness->isKill(node, myblock);
......@@ -983,8 +932,6 @@ private:
}
CompilerVariable* evalNum(AST_Num* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
if (node->num_type == AST_Num::INT)
return makeInt(node->n_int);
else if (node->num_type == AST_Num::FLOAT)
......@@ -996,8 +943,6 @@ private:
}
CompilerVariable* evalRepr(AST_Repr* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* var = evalExpr(node->value, unw_info);
ConcreteCompilerVariable* cvar = var->makeConverted(emitter, var->getBoxType());
var->decvref(emitter);
......@@ -1011,8 +956,6 @@ private:
}
CompilerVariable* evalSet(AST_Set* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
std::vector<CompilerVariable*> elts;
for (int i = 0; i < node->elts.size(); i++) {
CompilerVariable* value = evalExpr(node->elts[i], unw_info);
......@@ -1037,8 +980,6 @@ private:
}
CompilerVariable* evalSlice(AST_Slice* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* start, *stop, *step;
start = node->lower ? evalExpr(node->lower, unw_info) : getNone();
stop = node->upper ? evalExpr(node->upper, unw_info) : getNone();
......@@ -1064,15 +1005,9 @@ private:
return new ConcreteCompilerVariable(SLICE, rtn, true);
}
CompilerVariable* evalStr(AST_Str* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
return makeStr(&node->s);
}
CompilerVariable* evalStr(AST_Str* node, UnwindInfo unw_info) { return makeStr(&node->s); }
CompilerVariable* evalSubscript(AST_Subscript* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* value = evalExpr(node->value, unw_info);
CompilerVariable* slice = evalExpr(node->slice, unw_info);
......@@ -1083,8 +1018,6 @@ private:
}
CompilerVariable* evalTuple(AST_Tuple* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
std::vector<CompilerVariable*> elts;
for (int i = 0; i < node->elts.size(); i++) {
CompilerVariable* value = evalExpr(node->elts[i], unw_info);
......@@ -1100,8 +1033,6 @@ private:
}
CompilerVariable* evalUnaryOp(AST_UnaryOp* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* operand = evalExpr(node->operand, unw_info);
if (node->op_type == AST_TYPE::Not) {
......@@ -1129,8 +1060,6 @@ private:
}
CompilerVariable* evalYield(AST_Yield* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* generator = _getFake(internString(PASSED_GENERATOR_NAME), false);
ConcreteCompilerVariable* convertedGenerator = generator->makeConverted(emitter, generator->getBoxType());
......@@ -1148,8 +1077,6 @@ private:
}
ConcreteCompilerVariable* unboxVar(ConcreteCompilerType* t, llvm::Value* v, bool grabbed) {
assert(state != PARTIAL);
if (t == BOXED_INT) {
llvm::Value* unboxed = emitter.getBuilder()->CreateCall(g.funcs.unboxInt, v);
ConcreteCompilerVariable* rtn = new ConcreteCompilerVariable(INT, unboxed, true);
......@@ -1175,205 +1102,107 @@ private:
}
CompilerVariable* rtn = NULL;
if (state != PARTIAL) {
switch (node->type) {
case AST_TYPE::Attribute:
rtn = evalAttribute(ast_cast<AST_Attribute>(node), unw_info);
break;
case AST_TYPE::AugBinOp:
rtn = evalAugBinOp(ast_cast<AST_AugBinOp>(node), unw_info);
break;
case AST_TYPE::BinOp:
rtn = evalBinOp(ast_cast<AST_BinOp>(node), unw_info);
break;
case AST_TYPE::Call:
rtn = evalCall(ast_cast<AST_Call>(node), unw_info);
break;
case AST_TYPE::Compare:
rtn = evalCompare(ast_cast<AST_Compare>(node), unw_info);
break;
case AST_TYPE::Dict:
rtn = evalDict(ast_cast<AST_Dict>(node), unw_info);
break;
case AST_TYPE::Index:
rtn = evalIndex(ast_cast<AST_Index>(node), unw_info);
break;
case AST_TYPE::Lambda:
rtn = evalLambda(ast_cast<AST_Lambda>(node), unw_info);
break;
case AST_TYPE::List:
rtn = evalList(ast_cast<AST_List>(node), unw_info);
break;
case AST_TYPE::Name:
rtn = evalName(ast_cast<AST_Name>(node), unw_info);
break;
case AST_TYPE::Num:
rtn = evalNum(ast_cast<AST_Num>(node), unw_info);
break;
case AST_TYPE::Repr:
rtn = evalRepr(ast_cast<AST_Repr>(node), unw_info);
break;
case AST_TYPE::Set:
rtn = evalSet(ast_cast<AST_Set>(node), unw_info);
break;
case AST_TYPE::Slice:
rtn = evalSlice(ast_cast<AST_Slice>(node), unw_info);
break;
case AST_TYPE::Str:
rtn = evalStr(ast_cast<AST_Str>(node), unw_info);
break;
case AST_TYPE::Subscript:
rtn = evalSubscript(ast_cast<AST_Subscript>(node), unw_info);
break;
case AST_TYPE::Tuple:
rtn = evalTuple(ast_cast<AST_Tuple>(node), unw_info);
break;
case AST_TYPE::UnaryOp:
rtn = evalUnaryOp(ast_cast<AST_UnaryOp>(node), unw_info);
break;
case AST_TYPE::Yield:
rtn = evalYield(ast_cast<AST_Yield>(node), unw_info);
break;
case AST_TYPE::ClsAttribute:
rtn = evalClsAttribute(ast_cast<AST_ClsAttribute>(node), unw_info);
break;
case AST_TYPE::LangPrimitive:
rtn = evalLangPrimitive(ast_cast<AST_LangPrimitive>(node), unw_info);
break;
default:
printf("Unhandled expr type: %d (irgenerator.cpp:" STRINGIFY(__LINE__) ")\n", node->type);
exit(1);
}
assert(rtn);
// Out-guarding:
BoxedClass* speculated_class = types->speculatedExprClass(node);
if (speculated_class != NULL) {
assert(rtn);
ConcreteCompilerType* speculated_type = typeFromClass(speculated_class);
if (VERBOSITY("irgen") >= 1) {
printf("Speculating that %s is actually %s, at ", rtn->getConcreteType()->debugName().c_str(),
speculated_type->debugName().c_str());
PrintVisitor printer;
node->accept(&printer);
printf("\n");
}
// That's not really a speculation.... could potentially handle this here, but
// I think it's better to just not generate bad speculations:
assert(!rtn->canConvertTo(speculated_type));
switch (node->type) {
case AST_TYPE::Attribute:
rtn = evalAttribute(ast_cast<AST_Attribute>(node), unw_info);
break;
case AST_TYPE::AugBinOp:
rtn = evalAugBinOp(ast_cast<AST_AugBinOp>(node), unw_info);
break;
case AST_TYPE::BinOp:
rtn = evalBinOp(ast_cast<AST_BinOp>(node), unw_info);
break;
case AST_TYPE::Call:
rtn = evalCall(ast_cast<AST_Call>(node), unw_info);
break;
case AST_TYPE::Compare:
rtn = evalCompare(ast_cast<AST_Compare>(node), unw_info);
break;
case AST_TYPE::Dict:
rtn = evalDict(ast_cast<AST_Dict>(node), unw_info);
break;
case AST_TYPE::Index:
rtn = evalIndex(ast_cast<AST_Index>(node), unw_info);
break;
case AST_TYPE::Lambda:
rtn = evalLambda(ast_cast<AST_Lambda>(node), unw_info);
break;
case AST_TYPE::List:
rtn = evalList(ast_cast<AST_List>(node), unw_info);
break;
case AST_TYPE::Name:
rtn = evalName(ast_cast<AST_Name>(node), unw_info);
break;
case AST_TYPE::Num:
rtn = evalNum(ast_cast<AST_Num>(node), unw_info);
break;
case AST_TYPE::Repr:
rtn = evalRepr(ast_cast<AST_Repr>(node), unw_info);
break;
case AST_TYPE::Set:
rtn = evalSet(ast_cast<AST_Set>(node), unw_info);
break;
case AST_TYPE::Slice:
rtn = evalSlice(ast_cast<AST_Slice>(node), unw_info);
break;
case AST_TYPE::Str:
rtn = evalStr(ast_cast<AST_Str>(node), unw_info);
break;
case AST_TYPE::Subscript:
rtn = evalSubscript(ast_cast<AST_Subscript>(node), unw_info);
break;
case AST_TYPE::Tuple:
rtn = evalTuple(ast_cast<AST_Tuple>(node), unw_info);
break;
case AST_TYPE::UnaryOp:
rtn = evalUnaryOp(ast_cast<AST_UnaryOp>(node), unw_info);
break;
case AST_TYPE::Yield:
rtn = evalYield(ast_cast<AST_Yield>(node), unw_info);
break;
ConcreteCompilerVariable* old_rtn = rtn->makeConverted(emitter, UNKNOWN);
rtn->decvref(emitter);
case AST_TYPE::ClsAttribute:
rtn = evalClsAttribute(ast_cast<AST_ClsAttribute>(node), unw_info);
break;
case AST_TYPE::LangPrimitive:
rtn = evalLangPrimitive(ast_cast<AST_LangPrimitive>(node), unw_info);
break;
default:
printf("Unhandled expr type: %d (irgenerator.cpp:" STRINGIFY(__LINE__) ")\n", node->type);
exit(1);
}
llvm::Value* guard_check = old_rtn->makeClassCheck(emitter, speculated_class);
assert(guard_check->getType() == g.i1);
createExprTypeGuard(guard_check, node, old_rtn->getValue(), unw_info.current_stmt);
assert(rtn);
rtn = unboxVar(speculated_type, old_rtn->getValue(), true);
}
}
// Out-guarding:
BoxedClass* speculated_class = types->speculatedExprClass(node);
if (speculated_class != NULL) {
assert(rtn);
// In-guarding:
GuardList::ExprTypeGuard* guard = in_guards.getNodeTypeGuard(node);
if (guard != NULL) {
ConcreteCompilerType* speculated_type = typeFromClass(speculated_class);
if (VERBOSITY("irgen") >= 1) {
printf("merging guard after ");
printf("Speculating that %s is actually %s, at ", rtn->getConcreteType()->debugName().c_str(),
speculated_type->debugName().c_str());
PrintVisitor printer;
node->accept(&printer);
printf("; is_partial=%d\n", state == PARTIAL);
printf("\n");
}
if (state == PARTIAL) {
guard->branch->setSuccessor(1, curblock);
symbol_table = SymbolTable(guard->st);
assert(guard->val);
state = RUNNING;
return guard->val;
} else {
assert(state == RUNNING);
compareKeyset(&symbol_table, &guard->st);
assert(symbol_table.size() == guard->st.size());
llvm::BasicBlock* ramp_block
= llvm::BasicBlock::Create(g.context, "deopt_ramp", irstate->getLLVMFunction());
llvm::BasicBlock* join_block
= llvm::BasicBlock::Create(g.context, "deopt_join", irstate->getLLVMFunction());
SymbolTable joined_st;
for (const auto& p : guard->st) {
// if (VERBOSITY("irgen") >= 1) printf("merging %s\n", p.first.c_str());
CompilerVariable* curval = symbol_table[p.first];
// I'm not sure this is necessary or even correct:
// ASSERT(curval->getVrefs() == p.second->getVrefs(), "%s %d %d", p.first.c_str(),
// curval->getVrefs(), p.second->getVrefs());
ConcreteCompilerType* merged_type = curval->getConcreteType();
emitter.getBuilder()->SetInsertPoint(ramp_block);
ConcreteCompilerVariable* converted1 = p.second->makeConverted(emitter, merged_type);
p.second->decvref(emitter); // for makeconverted
// guard->st[p.first] = converted;
// p.second->decvref(emitter); // for the replaced version
emitter.getBuilder()->SetInsertPoint(curblock);
ConcreteCompilerVariable* converted2 = curval->makeConverted(emitter, merged_type);
curval->decvref(emitter); // for makeconverted
// symbol_table[p.first] = converted;
// curval->decvref(emitter); // for the replaced version
if (converted1->getValue() == converted2->getValue()) {
joined_st[p.first] = new ConcreteCompilerVariable(merged_type, converted1->getValue(), true);
} else {
emitter.getBuilder()->SetInsertPoint(join_block);
llvm::PHINode* phi = emitter.getBuilder()->CreatePHI(merged_type->llvmType(), 2, p.first.str());
phi->addIncoming(converted1->getValue(), ramp_block);
phi->addIncoming(converted2->getValue(), curblock);
joined_st[p.first] = new ConcreteCompilerVariable(merged_type, phi, true);
}
// TODO free dead Variable objects!
}
symbol_table = joined_st;
emitter.getBuilder()->SetInsertPoint(curblock);
emitter.getBuilder()->CreateBr(join_block);
emitter.getBuilder()->SetInsertPoint(ramp_block);
emitter.getBuilder()->CreateBr(join_block);
guard->branch->setSuccessor(1, ramp_block);
{
ConcreteCompilerType* this_merged_type = rtn->getConcreteType();
// That's not really a speculation.... could potentially handle this here, but
// I think it's better to just not generate bad speculations:
assert(!rtn->canConvertTo(speculated_type));
emitter.getBuilder()->SetInsertPoint(ramp_block);
ConcreteCompilerVariable* converted_guard_rtn
= guard->val->makeConverted(emitter, this_merged_type);
guard->val->decvref(emitter);
emitter.getBuilder()->SetInsertPoint(curblock);
ConcreteCompilerVariable* converted_rtn = rtn->makeConverted(emitter, this_merged_type);
rtn->decvref(emitter);
emitter.getBuilder()->SetInsertPoint(join_block);
llvm::PHINode* this_phi = emitter.getBuilder()->CreatePHI(this_merged_type->llvmType(), 2);
this_phi->addIncoming(converted_rtn->getValue(), curblock);
this_phi->addIncoming(converted_guard_rtn->getValue(), ramp_block);
rtn = new ConcreteCompilerVariable(this_merged_type, this_phi, true);
ConcreteCompilerVariable* old_rtn = rtn->makeConverted(emitter, UNKNOWN);
rtn->decvref(emitter);
// TODO free dead Variable objects!
}
llvm::Value* guard_check = old_rtn->makeClassCheck(emitter, speculated_class);
assert(guard_check->getType() == g.i1);
createExprTypeGuard(guard_check, node, old_rtn->getValue(), unw_info.current_stmt);
curblock = join_block;
emitter.getBuilder()->SetInsertPoint(curblock);
}
rtn = unboxVar(speculated_type, old_rtn->getValue(), true);
}
assert(rtn || state == PARTIAL);
assert(rtn);
return rtn;
}
......@@ -1440,14 +1269,12 @@ private:
}
void _doSetattr(AST_Attribute* target, CompilerVariable* val, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* t = evalExpr(target->value, unw_info);
t->setattr(emitter, getEmptyOpInfo(unw_info), &target->attr.str(), val);
t->decvref(emitter);
}
void _doSetitem(AST_Subscript* target, CompilerVariable* val, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* tget = evalExpr(target->value, unw_info);
CompilerVariable* slice = evalExpr(target->slice, unw_info);
......@@ -1482,7 +1309,6 @@ private:
}
void _doUnpackTuple(AST_Tuple* target, CompilerVariable* val, UnwindInfo unw_info) {
assert(state != PARTIAL);
int ntargets = target->elts.size();
std::vector<CompilerVariable*> unpacked = val->unpack(emitter, getOpInfoForNode(target, unw_info), ntargets);
......@@ -1502,7 +1328,6 @@ private:
}
void _doSet(AST* target, CompilerVariable* val, UnwindInfo unw_info) {
assert(state != PARTIAL);
switch (target->type) {
case AST_TYPE::Attribute:
_doSetattr(ast_cast<AST_Attribute>(target), val, unw_info);
......@@ -1547,8 +1372,6 @@ private:
void doAssign(AST_Assign* node, UnwindInfo unw_info) {
CompilerVariable* val = evalExpr(node->value, unw_info);
if (state == PARTIAL)
return;
for (int i = 0; i < node->targets.size(); i++) {
_doSet(node->targets[i], val, unw_info);
......@@ -1557,9 +1380,6 @@ private:
}
void doClassDef(AST_ClassDef* node, UnwindInfo unw_info) {
if (state == PARTIAL)
return;
assert(node->type == AST_TYPE::ClassDef);
ScopeInfo* scope_info = irstate->getScopeInfoForNode(node);
assert(scope_info);
......@@ -1623,7 +1443,6 @@ private:
}
void doDelete(AST_Delete* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
for (AST_expr* target : node->targets) {
switch (target->type) {
case AST_TYPE::Subscript:
......@@ -1644,7 +1463,6 @@ private:
// invoke delitem in objmodel.cpp, which will invoke the listDelitem of list
void _doDelitem(AST_Subscript* target, UnwindInfo unw_info) {
assert(state != PARTIAL);
CompilerVariable* tget = evalExpr(target->value, unw_info);
CompilerVariable* slice = evalExpr(target->slice, unw_info);
......@@ -1763,9 +1581,6 @@ private:
}
void doFunctionDef(AST_FunctionDef* node, UnwindInfo unw_info) {
if (state == PARTIAL)
return;
std::vector<CompilerVariable*> decorators;
for (auto d : node->decorator_list) {
decorators.push_back(evalExpr(d, unw_info));
......@@ -1783,9 +1598,6 @@ private:
}
void doPrint(AST_Print* node, UnwindInfo unw_info) {
if (state == PARTIAL)
return;
ConcreteCompilerVariable* dest = NULL;
if (node->dest) {
auto d = evalExpr(node->dest, unw_info);
......@@ -1870,7 +1682,6 @@ private:
} else {
val = evalExpr(node->value, unw_info);
}
assert(state != PARTIAL);
assert(val);
// If we ask the return variable to become UNKNOWN (the typical return type),
......@@ -1907,7 +1718,6 @@ private:
assert(node->iffalse->idx > myblock->idx);
CompilerVariable* val = evalExpr(node->test, unw_info);
assert(state != PARTIAL);
assert(val);
// We could call nonzero here if there is no try-catch block?
......@@ -1926,15 +1736,11 @@ private:
void doExpr(AST_Expr* node, UnwindInfo unw_info) {
CompilerVariable* var = evalExpr(node->value, unw_info);
if (state == PARTIAL)
return;
var->decvref(emitter);
}
void doOSRExit(llvm::BasicBlock* normal_target, AST_Jump* osr_key) {
assert(state != PARTIAL);
llvm::BasicBlock* starting_block = curblock;
llvm::BasicBlock* onramp = llvm::BasicBlock::Create(g.context, "onramp", irstate->getLLVMFunction());
......@@ -2087,8 +1893,6 @@ private:
}
void doJump(AST_Jump* node, UnwindInfo unw_info) {
assert(state != PARTIAL);
endBlock(FINISHED);
llvm::BasicBlock* target = entry_blocks[node->target];
......@@ -2533,9 +2337,8 @@ public:
};
IRGenerator* createIRGenerator(IRGenState* irstate, std::unordered_map<CFGBlock*, llvm::BasicBlock*>& entry_blocks,
CFGBlock* myblock, TypeAnalysis* types, GuardList& out_guards,
const GuardList& in_guards, bool is_partial) {
return new IRGeneratorImpl(irstate, entry_blocks, myblock, types, out_guards, in_guards, is_partial);
CFGBlock* myblock, TypeAnalysis* types) {
return new IRGeneratorImpl(irstate, entry_blocks, myblock, types);
}
CLFunction* wrapFunction(AST* node, AST_arguments* args, const std::vector<AST_stmt*>& body, SourceInfo* source) {
......
......@@ -83,10 +83,7 @@ public:
llvm::Value* getScratchSpace(int min_bytes);
llvm::Value* getFrameInfoVar();
ConcreteCompilerType* getReturnType() {
assert(cf->spec);
return cf->spec->rtn_type;
}
ConcreteCompilerType* getReturnType() { return cf->getReturnType(); }
SourceInfo* getSourceInfo() { return source_info; }
......@@ -98,94 +95,6 @@ public:
ParamNames* getParamNames() { return param_names; }
};
class GuardList {
public:
struct ExprTypeGuard {
CFGBlock* cfg_block;
llvm::BranchInst* branch;
AST_expr* ast_node;
CompilerVariable* val;
SymbolTable st;
ExprTypeGuard(CFGBlock* cfg_block, llvm::BranchInst* branch, AST_expr* ast_node, CompilerVariable* val,
const SymbolTable& st);
};
struct BlockEntryGuard {
CFGBlock* cfg_block;
llvm::BranchInst* branch;
SymbolTable symbol_table;
BlockEntryGuard(CFGBlock* cfg_block, llvm::BranchInst* branch, const SymbolTable& symbol_table);
};
private:
std::unordered_map<AST_expr*, ExprTypeGuard*> expr_type_guards;
std::unordered_map<CFGBlock*, std::vector<BlockEntryGuard*>> block_begin_guards;
// typedef std::unordered_map<AST_expr*, ExprTypeGuard*>::iterator expr_type_guard_iterator;
// typedef std::unordered_map<AST_expr*, ExprTypeGuard*>::const_iterator expr_type_guard_const_iterator;
typedef decltype(expr_type_guards)::iterator expr_type_guard_iterator;
typedef decltype(expr_type_guards)::const_iterator expr_type_guard_const_iterator;
public:
llvm::iterator_range<expr_type_guard_iterator> exprGuards() {
return llvm::iterator_range<expr_type_guard_iterator>(expr_type_guards.begin(), expr_type_guards.end());
}
void getBlocksWithGuards(std::unordered_set<CFGBlock*>& add_to) {
for (const auto& p : block_begin_guards) {
add_to.insert(p.first);
}
}
void assertGotPatched() {
#ifndef NDEBUG
for (const auto& p : block_begin_guards) {
for (const auto g : p.second) {
assert(g->branch->getSuccessor(0) != g->branch->getSuccessor(1));
}
}
for (const auto& p : expr_type_guards) {
assert(p.second->branch->getSuccessor(0) != p.second->branch->getSuccessor(1));
}
#endif
}
ExprTypeGuard* getNodeTypeGuard(AST_expr* node) const {
expr_type_guard_const_iterator it = expr_type_guards.find(node);
if (it == expr_type_guards.end())
return NULL;
return it->second;
}
bool isEmpty() const { return expr_type_guards.size() == 0 && block_begin_guards.size() == 0; }
void addExprTypeGuard(CFGBlock* cfg_block, llvm::BranchInst* branch, AST_expr* ast_node, CompilerVariable* val,
const SymbolTable& st) {
abort();
ExprTypeGuard*& g = expr_type_guards[ast_node];
assert(g == NULL);
g = new ExprTypeGuard(cfg_block, branch, ast_node, val, st);
}
void registerGuardForBlockEntry(CFGBlock* cfg_block, llvm::BranchInst* branch, const SymbolTable& st) {
// printf("Adding guard for block %p, in %p\n", cfg_block, this);
std::vector<BlockEntryGuard*>& v = block_begin_guards[cfg_block];
v.push_back(new BlockEntryGuard(cfg_block, branch, st));
}
const std::vector<BlockEntryGuard*>& getGuardsForBlock(CFGBlock* block) const {
std::unordered_map<CFGBlock*, std::vector<BlockEntryGuard*>>::const_iterator it
= block_begin_guards.find(block);
if (it != block_begin_guards.end())
return it->second;
static std::vector<BlockEntryGuard*> empty_list;
return empty_list;
}
};
class IRGenerator {
private:
public:
......@@ -214,8 +123,7 @@ public:
class IREmitter;
IREmitter* createIREmitter(IRGenState* irstate, llvm::BasicBlock*& curblock, IRGenerator* irgenerator = NULL);
IRGenerator* createIRGenerator(IRGenState* irstate, std::unordered_map<CFGBlock*, llvm::BasicBlock*>& entry_blocks,
CFGBlock* myblock, TypeAnalysis* types, GuardList& out_guards,
const GuardList& in_guards, bool is_partial);
CFGBlock* myblock, TypeAnalysis* types);
CLFunction* wrapFunction(AST* node, AST_arguments* args, const std::vector<AST_stmt*>& body, SourceInfo* source);
}
......
......@@ -15,6 +15,7 @@
#ifndef PYSTON_CODEGEN_OSRENTRY_H
#define PYSTON_CODEGEN_OSRENTRY_H
#include <map>
#include <vector>
#include "core/stringpool.h"
......
......@@ -203,7 +203,11 @@ public:
llvm::Value* llvm_code, EffortLevel effort, const OSREntryDescriptor* entry_descriptor)
: clfunc(NULL), func(func), spec(spec), entry_descriptor(entry_descriptor), is_interpreted(is_interpreted),
code(code), llvm_code(llvm_code), effort(effort), times_called(0), times_speculation_failed(0),
location_map(nullptr) {}
location_map(nullptr) {
assert((spec != NULL) + (entry_descriptor != NULL) == 1);
}
ConcreteCompilerType* getReturnType();
// TODO this will need to be implemented eventually; things to delete:
// - line_table if it exists
......@@ -297,16 +301,17 @@ public:
void addVersion(CompiledFunction* compiled) {
assert(compiled);
assert(compiled->spec);
assert(compiled->spec->arg_types.size() == num_args + (takes_varargs ? 1 : 0) + (takes_kwargs ? 1 : 0));
assert((compiled->spec != NULL) + (compiled->entry_descriptor != NULL) == 1);
assert(compiled->clfunc == NULL);
assert(compiled->is_interpreted == (compiled->code == NULL));
assert(compiled->is_interpreted == (compiled->llvm_code == NULL));
compiled->clfunc = this;
if (compiled->entry_descriptor == NULL)
if (compiled->entry_descriptor == NULL) {
assert(compiled->spec->arg_types.size() == num_args + (takes_varargs ? 1 : 0) + (takes_kwargs ? 1 : 0));
versions.push_back(compiled);
else
} else {
osr_versions[compiled->entry_descriptor] = compiled;
}
}
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment