Commit cd6d0b71 authored by Kirill Smelkov's avatar Kirill Smelkov

X disable OS cache retrieve for now as well; Add tracing of substeps in handleδZ

parent f2ddf36f
...@@ -937,6 +937,9 @@ retry: ...@@ -937,6 +937,9 @@ retry:
defer cancel() defer cancel()
// invalidate kernel cache for file data // invalidate kernel cache for file data
if log.V(2) {
log.Infof("\t >>> invalidate OS data cache")
}
wg := xsync.NewWorkGroup(ctx) wg := xsync.NewWorkGroup(ctx)
for foid, δfile := range δF.ByFile { for foid, δfile := range δF.ByFile {
// file was requested to be tracked -> it must be present in fileTab // file was requested to be tracked -> it must be present in fileTab
...@@ -964,11 +967,17 @@ retry: ...@@ -964,11 +967,17 @@ retry:
if err != nil { if err != nil {
return err return err
} }
if log.V(2) {
log.Infof("\t <<< invalidate OS data cache")
}
// invalidate kernel cache for attributes // invalidate kernel cache for attributes
// we need to do it only if we see topology (i.e. btree) change // we need to do it only if we see topology (i.e. btree) change
// //
// do it after completing data invalidations. // do it after completing data invalidations.
if log.V(2) {
log.Infof("\t >>> invalidate OS attr cache")
}
wg = xsync.NewWorkGroup(ctx) wg = xsync.NewWorkGroup(ctx)
for foid, δfile := range δF.ByFile { for foid, δfile := range δF.ByFile {
if !δfile.Size { if !δfile.Size {
...@@ -983,8 +992,14 @@ retry: ...@@ -983,8 +992,14 @@ retry:
if err != nil { if err != nil {
return err return err
} }
if log.V(2) {
log.Infof("\t <<< invalidate OS attr cache")
}
// resync .zhead to δZ.tid // resync .zhead to δZ.tid
if log.V(2) {
log.Infof("\t >>> resync zhead to δZ.tid")
}
// 1. abort old and resync to new txn/at // 1. abort old and resync to new txn/at
transaction.Current(zhead.TxnCtx).Abort() transaction.Current(zhead.TxnCtx).Abort()
...@@ -1019,8 +1034,14 @@ retry: ...@@ -1019,8 +1034,14 @@ retry:
// that's why revision is only approximated // that's why revision is only approximated
file.revApprox = zhead.At() file.revApprox = zhead.At()
} }
if log.V(2) {
log.Infof("\t <<< resync zhead to δZ.tid")
}
// notify .wcfs/zhead // notify .wcfs/zhead
if log.V(2) {
log.Infof("\t >>> notify .wcfs/zhead")
}
gdebug.zheadSockTabMu.Lock() gdebug.zheadSockTabMu.Lock()
for sk := range gdebug.zheadSockTab { for sk := range gdebug.zheadSockTab {
_, err := fmt.Fprintf(xio.BindCtxW(sk, ctx), "%s\n", δZ.Tid) _, err := fmt.Fprintf(xio.BindCtxW(sk, ctx), "%s\n", δZ.Tid)
...@@ -1031,12 +1052,18 @@ retry: ...@@ -1031,12 +1052,18 @@ retry:
} }
} }
gdebug.zheadSockTabMu.Unlock() gdebug.zheadSockTabMu.Unlock()
if log.V(2) {
log.Infof("\t <<< notify .wcfs/zhead")
}
// shrink δFtail not to grow indefinitely. // shrink δFtail not to grow indefinitely.
// cover history for at least 1 minute, but including all watches. // cover history for at least 1 minute, but including all watches.
// //
// TODO shrink δFtail only once in a while - there is no need to compute // TODO shrink δFtail only once in a while - there is no need to compute
// revCut and cut δFtail on every transaction. // revCut and cut δFtail on every transaction.
if log.V(2) {
log.Infof("\t >>> shrink δFtail")
}
revCut := zodb.TidFromTime(zhead.At().Time().Add(-1*time.Minute)) revCut := zodb.TidFromTime(zhead.At().Time().Add(-1*time.Minute))
head.wlinkMu.Lock() head.wlinkMu.Lock()
for wlink := range head.wlinkTab { for wlink := range head.wlinkTab {
...@@ -1048,8 +1075,14 @@ retry: ...@@ -1048,8 +1075,14 @@ retry:
} }
head.wlinkMu.Unlock() head.wlinkMu.Unlock()
bfdir.δFtail.ForgetPast(revCut) bfdir.δFtail.ForgetPast(revCut)
if log.V(2) {
log.Infof("\t <<< shrink δFtail")
}
// notify zhead.At waiters // notify zhead.At waiters
if log.V(2) {
log.Infof("\t notify zhead.At waiters")
}
for hw := range head.hwait { for hw := range head.hwait {
if hw.at <= δZ.Tid { if hw.at <= δZ.Tid {
delete(head.hwait, hw) delete(head.hwait, hw)
...@@ -1057,6 +1090,9 @@ retry: ...@@ -1057,6 +1090,9 @@ retry:
} }
} }
if log.V(2) {
log.Infof("\t OK")
}
return nil return nil
} }
...@@ -1108,6 +1144,10 @@ func (head *Head) zheadWait(ctx context.Context, at zodb.Tid) (err error) { ...@@ -1108,6 +1144,10 @@ func (head *Head) zheadWait(ctx context.Context, at zodb.Tid) (err error) {
// called with zheadMu wlocked. // called with zheadMu wlocked.
func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) { func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) {
defer xerr.Contextf(&err, "%s: invalidate blk #%d:", f.path(), blk) defer xerr.Contextf(&err, "%s: invalidate blk #%d:", f.path(), blk)
if log.V(2) {
log.Infof("%s: >>> invalidate blk #%d", f.path(), blk)
defer log.Infof("%s: <<< invalidate blk #%d", f.path(), blk)
}
fsconn := gfsconn fsconn := gfsconn
blksize := f.blksize blksize := f.blksize
...@@ -1129,7 +1169,9 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) { ...@@ -1129,7 +1169,9 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) {
// TODO skip retrieve/store if len(f.watchTab) == 0 // TODO skip retrieve/store if len(f.watchTab) == 0
// try to retrieve cache of current head/data[blk], if we got nothing from f.loading // try to retrieve cache of current head/data[blk], if we got nothing from f.loading
if blkdata == nil { // XXX temp disabled to avoid potential store_notify vs read deadlock on locked page
// if blkdata == nil {
if false {
blkdata = make([]byte, blksize) blkdata = make([]byte, blksize)
n, st := fsconn.FileRetrieveCache(f.Inode(), off, blkdata) n, st := fsconn.FileRetrieveCache(f.Inode(), off, blkdata)
if st != fuse.OK { if st != fuse.OK {
...@@ -1144,7 +1186,23 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) { ...@@ -1144,7 +1186,23 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) {
// system overloaded. // system overloaded.
// //
// if we have the data - preserve it under @revX/bigfile/file[blk]. // if we have the data - preserve it under @revX/bigfile/file[blk].
if int64(len(blkdata)) == blksize {
// XXX temporarily disabled due to deadlock in between lookup, forget and handleδZ
//
// T1 lookup "head/bigfile/x" T3 Forget any
//
// lookupLock.RLock <--------------------
// zheadMu.RLock ------- |
// | lookupLock.WLock
// | ^
// | |
// | |
// T2 zwatcher->handleδZ | |
// | |
// zheadMu.WLock <------ |
// lookupLock.RLock ---------------------
// if int64(len(blkdata)) == blksize {
if false {
err := func() error { err := func() error {
// store retrieved data back to OS cache for file @<rev>/file[blk] // store retrieved data back to OS cache for file @<rev>/file[blk]
δFtail := f.head.bfdir.δFtail δFtail := f.head.bfdir.δFtail
...@@ -1185,6 +1243,10 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) { ...@@ -1185,6 +1243,10 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) {
// called with zheadMu wlocked. // called with zheadMu wlocked.
func (f *BigFile) invalidateAttr() (err error) { func (f *BigFile) invalidateAttr() (err error) {
defer xerr.Contextf(&err, "%s: invalidate attr", f.path()) defer xerr.Contextf(&err, "%s: invalidate attr", f.path())
if log.V(2) {
log.Infof("%s: >>> invalidateAttr", f.path())
defer log.Infof("%s: <<< invalidateAttr", f.path())
}
fsconn := gfsconn fsconn := gfsconn
st := fsconn.FileNotify(f.Inode(), -1, -1) // metadata only st := fsconn.FileNotify(f.Inode(), -1, -1) // metadata only
if st != fuse.OK { if st != fuse.OK {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment