Commit 612c89f0 authored by Chris Toshok's avatar Chris Toshok

undo the threading changes

parent c0ffc375
......@@ -51,29 +51,103 @@ PthreadFastMutex threading_lock;
// be checked while the threading_lock is held; might not be worth it.
int num_starting_threads(0);
ThreadStateInternal::ThreadStateInternal(void* stack_start, pthread_t pthread_id, PyThreadState* public_thread_state)
: saved(false), stack_start(stack_start), pthread_id(pthread_id), public_thread_state(public_thread_state) {
}
class ThreadStateInternal {
private:
bool saved;
ucontext_t ucontext;
public:
void* stack_start;
void ThreadStateInternal::accept(gc::GCVisitor* v) {
auto pub_state = public_thread_state;
v->visitIf(pub_state->curexc_type);
v->visitIf(pub_state->curexc_value);
v->visitIf(pub_state->curexc_traceback);
v->visitIf(pub_state->dict);
struct StackInfo {
BoxedGenerator* next_generator;
void* stack_start;
void* stack_limit;
for (auto& stack_info : previous_stacks) {
v->visit(stack_info.next_generator);
StackInfo(BoxedGenerator* next_generator, void* stack_start, void* stack_limit)
: next_generator(next_generator), stack_start(stack_start), stack_limit(stack_limit) {
#if STACK_GROWS_DOWN
v->visitPotentialRange((void**)stack_info.stack_limit, (void**)stack_info.stack_start);
assert(stack_start > stack_limit);
assert((char*)stack_start - (char*)stack_limit < (1L << 30));
#else
v->visitPotentialRange((void**)stack_info.stack_start, (void**)stack_info.stack_limit);
assert(stack_start < stack_limit);
assert((char*)stack_limit - (char*)stack_start < (1L << 30));
#endif
}
};
std::vector<StackInfo> previous_stacks;
pthread_t pthread_id;
PyThreadState* public_thread_state;
ThreadStateInternal(void* stack_start, pthread_t pthread_id, PyThreadState* public_thread_state)
: saved(false), stack_start(stack_start), pthread_id(pthread_id), public_thread_state(public_thread_state) {}
void saveCurrent() {
assert(!saved);
getcontext(&ucontext);
saved = true;
}
void popCurrent() {
assert(saved);
saved = false;
}
}
bool isValid() { return saved; }
ucontext_t* getContext() { return &ucontext; }
void pushGenerator(BoxedGenerator* g, void* new_stack_start, void* old_stack_limit) {
previous_stacks.emplace_back(g, this->stack_start, old_stack_limit);
this->stack_start = new_stack_start;
}
void popGenerator() {
assert(previous_stacks.size());
StackInfo& stack = previous_stacks.back();
stack_start = stack.stack_start;
previous_stacks.pop_back();
}
void assertNoGenerators() { assert(previous_stacks.size() == 0); }
void accept(gc::GCVisitor* v) {
auto pub_state = public_thread_state;
if (pub_state->curexc_type)
v->visit(pub_state->curexc_type);
if (pub_state->curexc_value)
v->visit(pub_state->curexc_value);
if (pub_state->curexc_traceback)
v->visit(pub_state->curexc_traceback);
if (pub_state->dict)
v->visit(pub_state->dict);
for (auto& stack_info : previous_stacks) {
v->visit(stack_info.next_generator);
#if STACK_GROWS_DOWN
v->visitPotentialRange((void**)stack_info.stack_limit, (void**)stack_info.stack_start);
#else
v->visitPotentialRange((void**)stack_info.stack_start, (void**)stack_info.stack_limit);
#endif
}
}
};
static std::unordered_map<pthread_t, ThreadStateInternal*> current_threads;
__thread ThreadStateInternal* ThreadStateInternal::current = 0;
static __thread ThreadStateInternal* current_internal_thread_state = 0;
void pushGenerator(BoxedGenerator* g, void* new_stack_start, void* old_stack_limit) {
assert(new_stack_start);
assert(old_stack_limit);
assert(current_internal_thread_state);
current_internal_thread_state->pushGenerator(g, new_stack_start, old_stack_limit);
}
void popGenerator() {
assert(current_internal_thread_state);
current_internal_thread_state->popGenerator();
}
// These are guarded by threading_lock
static int signals_waiting(0);
......@@ -111,10 +185,10 @@ static void visitLocalStack(gc::GCVisitor* v) {
assert(sizeof(registers) % 8 == 0);
v->visitPotentialRange((void**)&registers, (void**)((&registers) + 1));
assert(ThreadStateInternal::current);
assert(current_internal_thread_state);
#if STACK_GROWS_DOWN
void* stack_low = getCurrentStackLimit();
void* stack_high = ThreadStateInternal::current->stack_start;
void* stack_high = current_internal_thread_state->stack_start;
#else
void* stack_low = current_thread_state->stack_start;
void* stack_high = getCurrentStackLimit();
......@@ -123,7 +197,7 @@ static void visitLocalStack(gc::GCVisitor* v) {
assert(stack_low < stack_high);
v->visitPotentialRange((void**)stack_low, (void**)stack_high);
ThreadStateInternal::current->accept(v);
current_internal_thread_state->accept(v);
}
void visitAllStacks(gc::GCVisitor* v) {
......@@ -198,7 +272,7 @@ static void _thread_context_dump(int signum, siginfo_t* info, void* _context) {
printf("old rip: 0x%lx\n", (intptr_t)context->uc_mcontext.gregs[REG_RIP]);
}
assert(ThreadStateInternal::current == current_threads[tid]);
assert(current_internal_thread_state == current_threads[tid]);
pushThreadState(current_threads[tid], context);
signals_waiting--;
}
......@@ -238,8 +312,8 @@ static void* _thread_start(void* _arg) {
#else
void* stack_bottom = stack_start;
#endif
ThreadStateInternal::current = new ThreadStateInternal(stack_bottom, current_thread, &cur_thread_state);
current_threads[current_thread] = ThreadStateInternal::current;
current_internal_thread_state = new ThreadStateInternal(stack_bottom, current_thread, &cur_thread_state);
current_threads[current_thread] = current_internal_thread_state;
num_starting_threads--;
......@@ -251,7 +325,7 @@ static void* _thread_start(void* _arg) {
assert(!PyErr_Occurred());
void* rtn = start_func(arg1, arg2, arg3);
ThreadStateInternal::current->assertNoGenerators();
current_internal_thread_state->assertNoGenerators();
{
LOCK_REGION(&threading_lock);
......@@ -260,7 +334,7 @@ static void* _thread_start(void* _arg) {
if (VERBOSITY() >= 2)
printf("thread tid=%ld exited\n", current_thread);
}
ThreadStateInternal::current = 0;
current_internal_thread_state = 0;
return rtn;
}
......@@ -338,9 +412,9 @@ static void* find_stack() {
void registerMainThread() {
LOCK_REGION(&threading_lock);
assert(!ThreadStateInternal::current);
ThreadStateInternal::current = new ThreadStateInternal(find_stack(), pthread_self(), &cur_thread_state);
current_threads[pthread_self()] = ThreadStateInternal::current;
assert(!current_internal_thread_state);
current_internal_thread_state = new ThreadStateInternal(find_stack(), pthread_self(), &cur_thread_state);
current_threads[pthread_self()] = current_internal_thread_state;
struct sigaction act;
memset(&act, 0, sizeof(act));
......@@ -356,8 +430,8 @@ void registerMainThread() {
}
void finishMainThread() {
assert(ThreadStateInternal::current);
ThreadStateInternal::current->assertNoGenerators();
assert(current_internal_thread_state);
current_internal_thread_state->assertNoGenerators();
// TODO maybe this is the place to wait for non-daemon threads?
}
......@@ -377,8 +451,8 @@ extern "C" void beginAllowThreads() noexcept {
{
LOCK_REGION(&threading_lock);
assert(ThreadStateInternal::current);
ThreadStateInternal::current->saveCurrent();
assert(current_internal_thread_state);
current_internal_thread_state->saveCurrent();
}
}
......@@ -386,8 +460,8 @@ extern "C" void endAllowThreads() noexcept {
{
LOCK_REGION(&threading_lock);
assert(ThreadStateInternal::current);
ThreadStateInternal::current->popCurrent();
assert(current_internal_thread_state);
current_internal_thread_state->popCurrent();
}
......
......@@ -22,7 +22,6 @@
#include "core/common.h"
#include "core/thread_utils.h"
#include "core/types.h"
namespace pyston {
class Box;
......@@ -34,81 +33,6 @@ class GCVisitor;
namespace threading {
class ThreadStateInternal {
private:
bool saved;
ucontext_t ucontext;
void _popGenerator() {
assert(previous_stacks.size());
StackInfo& stack = previous_stacks.back();
stack_start = stack.stack_start;
previous_stacks.pop_back();
}
void _pushGenerator(BoxedGenerator* g, void* new_stack_start, void* old_stack_limit) {
previous_stacks.emplace_back(g, this->stack_start, old_stack_limit);
this->stack_start = new_stack_start;
}
public:
static __thread ThreadStateInternal* current;
void* stack_start;
struct StackInfo {
BoxedGenerator* next_generator;
void* stack_start;
void* stack_limit;
StackInfo(BoxedGenerator* next_generator, void* stack_start, void* stack_limit)
: next_generator(next_generator), stack_start(stack_start), stack_limit(stack_limit) {
#if STACK_GROWS_DOWN
assert(stack_start > stack_limit);
assert((char*)stack_start - (char*)stack_limit < (1L << 30));
#else
assert(stack_start < stack_limit);
assert((char*)stack_limit - (char*)stack_start < (1L << 30));
#endif
}
};
std::vector<StackInfo> previous_stacks;
pthread_t pthread_id;
PyThreadState* public_thread_state;
ThreadStateInternal(void* stack_start, pthread_t pthread_id, PyThreadState* public_thread_state);
void saveCurrent() {
assert(!saved);
getcontext(&ucontext);
saved = true;
}
void popCurrent() {
assert(saved);
saved = false;
}
bool isValid() const { return saved; }
ucontext_t* getContext() { return &ucontext; }
void assertNoGenerators() { assert(previous_stacks.size() == 0); }
void accept(gc::GCVisitor* v);
// Some hooks to keep track of the list of stacks that this thread has been using.
// Every time we switch to a new generator, we need to pass a reference to the generator
// itself (so we can access the registers it is saving), the location of the new stack, and
// where we stopped executing on the old stack.
inline static void pushGenerator(BoxedGenerator* g, void* new_stack_start, void* old_stack_limit) {
assert(new_stack_start);
assert(old_stack_limit);
assert(ThreadStateInternal::current);
current->_pushGenerator(g, new_stack_start, old_stack_limit);
}
inline static void popGenerator() {
assert(ThreadStateInternal::current);
current->_popGenerator();
}
};
// Whether or not a second thread was ever started:
bool threadWasStarted();
......@@ -123,6 +47,14 @@ void finishMainThread();
// stacks and thread-local PyThreadState objects
void visitAllStacks(gc::GCVisitor* v);
// Some hooks to keep track of the list of stacks that this thread has been using.
// Every time we switch to a new generator, we need to pass a reference to the generator
// itself (so we can access the registers it is saving), the location of the new stack, and
// where we stopped executing on the old stack.
void pushGenerator(BoxedGenerator* g, void* new_stack_start, void* old_stack_limit);
void popGenerator();
#ifndef THREADING_USE_GIL
#define THREADING_USE_GIL 1
#define THREADING_USE_GRWL 0
......
......@@ -90,7 +90,7 @@ void generatorEntry(BoxedGenerator* g) {
assert(g->cls == generator_cls);
assert(g->function->cls == function_cls);
threading::ThreadStateInternal::pushGenerator(g, g->stack_begin, g->returnContext);
threading::pushGenerator(g, g->stack_begin, g->returnContext);
try {
RegisterHelper context_registerer(g, __builtin_frame_address(0));
......@@ -107,7 +107,7 @@ void generatorEntry(BoxedGenerator* g) {
// we returned from the body of the generator. next/send/throw will notify the caller
g->entryExited = true;
threading::ThreadStateInternal::popGenerator();
threading::popGenerator();
}
swapContext(&g->context, g->returnContext, 0);
}
......@@ -264,9 +264,9 @@ extern "C" Box* yield(BoxedGenerator* obj, Box* value) {
BoxedGenerator* self = static_cast<BoxedGenerator*>(obj);
self->returnValue = value;
threading::ThreadStateInternal::popGenerator();
threading::popGenerator();
swapContext(&self->context, self->returnContext, 0);
threading::ThreadStateInternal::pushGenerator(obj, obj->stack_begin, obj->returnContext);
threading::pushGenerator(obj, obj->stack_begin, obj->returnContext);
// if the generator receives a exception from the caller we have to throw it
if (self->exception.type) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment