Commit 038ef9bb authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 7a837040
......@@ -65,13 +65,13 @@ cdef void on_sigbus(int sig, siginfo_t *si, void *_uc):
# - dump py-level traceback and abort.
# TODO turn SIGBUS into python-level exception? (see sigpanic in Go how to do).
writeerr("\nC: SIGBUS received; giving time to other threads" +
writeerr("\nC: SIGBUS received; giving time to other threads " +
"to dump their exceptions (if any) ...\n")
cdef PyGILState_STATE gstate = PyGILState_Ensure()
PyGILState_Release(gstate)
sleep(1)
writeerr("\nC: SIGBUS thread traceback:\n")
writeerr("\nC: SIGBUS'ed thread traceback:\n")
PyGILState_Ensure()
PyRun_SimpleString("import traceback; traceback.print_stack()")
writeerr("-> SIGBUS\n");
......
......@@ -162,7 +162,7 @@
// file but with different <at>. This could be achieved via watching with
// @<at_min>, and then deciding internally which views needs to be adjusted and
// which views need not. Wcfs does not oblige clients to do so though, and a
// client is free to use as many head/watch openenings as it needs to.
// client is free to use as many head/watch openings as it needs to.
//
// When clients are done with @<revX>/bigfile/<bigfileX> (i.e. client's
// transaction ends and array is unmapped), the server sees number of opened
......@@ -1134,7 +1134,7 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) (err erro
}
// we have the data - it can be used after watchers are updated
// XXX should we use ctx here? (see updateWatcher comments)
// XXX should we use ctx here? (see updateWatchers comments)
f.updateWatchers(ctx, blk, treepath, zblk, blkrevMax)
// data can be used now
......@@ -1169,7 +1169,7 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) (err erro
//
// Called with f.head.zconnMu rlocked.
//
// XXX do we really need to use/propagate caller contex here? ideally update
// XXX do we really need to use/propagate caller context here? ideally update
// watchers should be synchronous, and in practice we just use 30s timeout.
// Should a READ interrupt cause watch update failure?
func (f *BigFile) updateWatchers(ctx context.Context, blk int64, treepath []btree.LONode, zblk zBlk, blkrevMax zodb.Tid) {
......@@ -1434,9 +1434,11 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T
// blocks. If we don't, we can miss to send pin for a freshly read
// block which could have revision > w.at . XXX test
// XXX locking
// XXX register only if watch was created anew, not updated.
// XXX register only if watch was created anew, not updated?
f.watches[w] = struct{}{}
wlink.byfile[foid] = w
// XXX defer -> unregister watch if error?
// pin all tracked file blocks that were changed in (at, head] range.
......@@ -1484,8 +1486,8 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T
return err
}
// XXX locking, place = ok? or move closer to at==zodb.InvalidTid ^^^?
wlink.byfile[foid] = w
// XXX or register w to f & wlink here?
// NOTE registering f.watches[w] and wlink.byfile[foid] = w must come together.
return nil
}
......
......@@ -1197,6 +1197,10 @@ def test_wcfs():
wg.go(_)
wg.wait()
wl.close()
# NOTE if wcfs.go does not fully cleanup this canceled watch and leave it
# in half-working state, it will break on further commit, as pin to the
# watch won't be handled.
# TODO -> add explicit check for ^^^ if/when moved to separate test.
# invalid requests -> wcfs replies error
wl = t.openwatch()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment