Commit 9f829258 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent aae0714e
......@@ -117,7 +117,7 @@
//
// XXX locking -> explain atMu + slaves and refer to "Locking" in wcfs.go
//
// Conn.atMu > Conn.filehMu > FileH.mu
// Conn.atMu > Conn.filehMu > FileH.mmapMu
//
// Several locks are RWMutex instead of just Mutex not only to allow more
// concurrency, but, in the first place for correctness: pinner thread being
......@@ -150,7 +150,7 @@
//
// - wconn.atMu.R
// - wconn.filehMu.R
// - fileh.mu (R:.mmaps W:.pinned)
// - fileh.mmapMu (R:.mmaps W:.pinned)
//
// - virt_lock
//
......@@ -442,12 +442,12 @@ error _Conn::__pin1(PinReq *req) {
// Mapping.remmap_blk and Mapping.unmap under virt_lock locked. In those
// functions the order of locks is
//
// virt_lock, wconn.atMu.R, fileh.mu
// virt_lock, wconn.atMu.R, fileh.mmapMu
//
// So if we take virt_lock right around mmap._remmapblk(), the order of
// locks in pinner would be
//
// wconn.atMu.R, wconn.filehMu.R, fileh.mu, virt_lock
// wconn.atMu.R, wconn.filehMu.R, fileh.mmapMu, virt_lock
//
// which means there is AB-BA deadlock possibility.
//
......@@ -489,9 +489,9 @@ error _Conn::__pin1(PinReq *req) {
// our view to requested @at, and "closing" - due to other clients
// accessing wcfs/head/f simultaneously.
f->_mu.lock();
f->_mmapMu.lock();
defer([&]() {
f->_mu.unlock();
f->_mmapMu.unlock();
});
for (auto mmap : f->_mmaps) { // TODO use ↑blk_start for binary search
......@@ -635,7 +635,7 @@ error _Conn::resync(zodb::Tid at) {
return E(fmt::errorf("wcfs bug: head/file size %% blksize != 0"));
// replace zero regions in f mappings in accordance to adjusted f._headfsize.
// NOTE it is ok to access f._mmaps without locking f._mu because we hold wconn.atMu.W
// NOTE it is ok to access f._mmaps without locking f._mmapMu because we hold wconn.atMu.W
for (auto mmap : f->_mmaps) {
//printf(" resync -> %s: unzero [%lu:%lu)", v(at), f->_headfsize/f->blksize, headfsize/f->blksize);
uint8_t *mem_unzero_start = min(mmap->mem_stop,
......@@ -789,7 +789,7 @@ retry:
});
// do the actuall open.
// we hold only wconn.atMu.R, but niether wconn.filehMu, nor f.mu .
// we hold only wconn.atMu.R, but niether wconn.filehMu, nor f.mmapMu .
f->_openErr = f->_open();
if (f->_openErr != nil)
return make_pair(nil, E(f->_openErr));
......@@ -815,7 +815,7 @@ retry:
// Called with:
// - wconn.atMu held
// - wconn.filehMu not locked
// - f.mu not locked
// - f.mmapMu not locked
error _FileH::_open() {
_FileH* f = this;
Conn wconn = f->wconn;
......@@ -843,7 +843,7 @@ error _FileH::_open() {
v(f->_headf->name()), f->_headfsize, f->blksize);
// start watching f
// NOTE we are _not_ holding wconn.filehMu nor f.mu - only wconn.atMu to rely on wconn.at being stable.
// NOTE we are _not_ holding wconn.filehMu nor f.mmapMu - only wconn.atMu to rely on wconn.at being stable.
// NOTE wcfs will reply "ok" only after wcfs/head/at ≥ wconn.at
string ack;
tie(ack, err) = wconn->_wlink->sendReq(context::background(),
......@@ -941,9 +941,9 @@ error _FileH::_closeLocked(bool force) {
reterr1(fileh._headf->close());
// change all fileh.mmaps to cause EFAULT on any access after fileh.close
fileh._mu.lock();
fileh._mmapMu.lock();
defer([&]() {
fileh._mu.unlock();
fileh._mmapMu.unlock();
});
for (auto mmap : fileh._mmaps) {
......@@ -969,9 +969,9 @@ pair<Mapping, error> _FileH::mmap(int64_t blk_start, int64_t blk_len, VMA *vma)
// NOTE virtmem lock is held by virtmem caller
// XXX locking ok?
f.wconn->_atMu.RLock(); // e.g. f._headfsize
f._mu.lock(); // f._pinned, f._mmaps
f._mmapMu.lock(); // f._pinned, f._mmaps
defer([&]() {
f._mu.unlock();
f._mmapMu.unlock();
f.wconn->_atMu.RUnlock();
});
......@@ -1066,9 +1066,9 @@ error _Mapping::unmap() {
// NOTE virtmem lock is held by virtmem caller
f->wconn->_atMu.RLock();
f->_mu.lock(); // f._mmaps
f->_mmapMu.lock();
defer([&]() {
f->_mu.unlock();
f->_mmapMu.unlock();
f->wconn->_atMu.RUnlock();
});
......@@ -1091,7 +1091,7 @@ error _Mapping::unmap() {
// XXX clear other fields?
// XXX do it first? (to avoid pinner going through f.mmaps and hitting unmapped memory)
// -> no need: both pinner and unmap lock on f.mu
// -> no need: both pinner and unmap lock on f.mmapMu
//f->_mmaps.remove(mmap);
f->_mmaps.erase(
std::remove(f->_mmaps.begin(), f->_mmaps.end(), mmap),
......@@ -1107,7 +1107,7 @@ error _Mapping::unmap() {
//
// The following locks must be held by caller:
// - f.wconn.atMu
// - f._mu XXX not needed? (f._mmaps and f._pinned are not used)
// - f._mmapMu XXX not needed? (f._mmaps and f._pinned are not used)
error _Mapping::_remmapblk(int64_t blk, zodb::Tid at) {
// XXX must not be called after Mapping is switched to efault?
......@@ -1172,9 +1172,9 @@ error _Mapping::remmap_blk(int64_t blk) {
// NOTE virtmem lock is held by virtmem caller
f->wconn->_atMu.RLock();
f->_mu.lock();
f->_mmapMu.lock();
defer([&]() {
f->_mu.unlock();
f->_mmapMu.unlock();
f->wconn->_atMu.RUnlock();
});
......
......@@ -246,7 +246,7 @@ struct _FileH : object {
// protected by .wconn._atMu
off_t _headfsize;
sync::Mutex _mu; // atMu.W | atMu.R + _mu XXX -> mmapMu ?
sync::Mutex _mmapMu; // atMu.W | atMu.R + _mmapMu
dict<int64_t, zodb::Tid> _pinned; // {} blk -> rev that wcfs already sent us for this file
vector<Mapping> _mmaps; // []Mapping ↑blk_start mappings of this file
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment