Commit 18484eb9 authored by Greg Thelen's avatar Greg Thelen Committed by Greg Kroah-Hartman

writeback: safer lock nesting

commit 2e898e4c upstream.

lock_page_memcg()/unlock_page_memcg() use spin_lock_irqsave/restore() if
the page's memcg is undergoing move accounting, which occurs when a
process leaves its memcg for a new one that has
memory.move_charge_at_immigrate set.

unlocked_inode_to_wb_begin,end() use spin_lock_irq/spin_unlock_irq() if
the given inode is switching writeback domains.  Switches occur when
enough writes are issued from a new domain.

This existing pattern is thus suspicious:
    lock_page_memcg(page);
    unlocked_inode_to_wb_begin(inode, &locked);
    ...
    unlocked_inode_to_wb_end(inode, locked);
    unlock_page_memcg(page);

If both inode switch and process memcg migration are both in-flight then
unlocked_inode_to_wb_end() will unconditionally enable interrupts while
still holding the lock_page_memcg() irq spinlock.  This suggests the
possibility of deadlock if an interrupt occurs before unlock_page_memcg().

    truncate
    __cancel_dirty_page
    lock_page_memcg
    unlocked_inode_to_wb_begin
    unlocked_inode_to_wb_end
    <interrupts mistakenly enabled>
                                    <interrupt>
                                    end_page_writeback
                                    test_clear_page_writeback
                                    lock_page_memcg
                                    <deadlock>
    unlock_page_memcg

Due to configuration limitations this deadlock is not currently possible
because we don't mix cgroup writeback (a cgroupv2 feature) and
memory.move_charge_at_immigrate (a cgroupv1 feature).

If the kernel is hacked to always claim inode switching and memcg
moving_account, then this script triggers lockup in less than a minute:

  cd /mnt/cgroup/memory
  mkdir a b
  echo 1 > a/memory.move_charge_at_immigrate
  echo 1 > b/memory.move_charge_at_immigrate
  (
    echo $BASHPID > a/cgroup.procs
    while true; do
      dd if=/dev/zero of=/mnt/big bs=1M count=256
    done
  ) &
  while true; do
    sync
  done &
  sleep 1h &
  SLEEP=$!
  while true; do
    echo $SLEEP > a/cgroup.procs
    echo $SLEEP > b/cgroup.procs
  done

The deadlock does not seem possible, so it's debatable if there's any
reason to modify the kernel.  I suggest we should to prevent future
surprises.  And Wang Long said "this deadlock occurs three times in our
environment", so there's more reason to apply this, even to stable.
Stable 4.4 has minor conflicts applying this patch.  For a clean 4.4 patch
see "[PATCH for-4.4] writeback: safer lock nesting"
https://lkml.org/lkml/2018/4/11/146

Wang Long said "this deadlock occurs three times in our environment"

[gthelen@google.com: v4]
  Link: http://lkml.kernel.org/r/20180411084653.254724-1-gthelen@google.com
[akpm@linux-foundation.org: comment tweaks, struct initialization simplification]
Change-Id: Ibb773e8045852978f6207074491d262f1b3fb613
Link: http://lkml.kernel.org/r/20180410005908.167976-1-gthelen@google.com
Fixes: 682aa8e1 ("writeback: implement unlocked_inode_to_wb transaction and use it for stat updates")
Signed-off-by: default avatarGreg Thelen <gthelen@google.com>
Reported-by: default avatarWang Long <wanglong19@meituan.com>
Acked-by: default avatarWang Long <wanglong19@meituan.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: <stable@vger.kernel.org>	[v4.2+]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
[natechancellor: Adjust context due to lack of b93b0163]
Signed-off-by: default avatarNathan Chancellor <natechancellor@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 71f24a91
...@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits) ...@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits)
*/ */
if (inode && inode_to_wb_is_valid(inode)) { if (inode && inode_to_wb_is_valid(inode)) {
struct bdi_writeback *wb; struct bdi_writeback *wb;
bool locked, congested; struct wb_lock_cookie lock_cookie = {};
bool congested;
wb = unlocked_inode_to_wb_begin(inode, &locked); wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
congested = wb_congested(wb, cong_bits); congested = wb_congested(wb, cong_bits);
unlocked_inode_to_wb_end(inode, locked); unlocked_inode_to_wb_end(inode, &lock_cookie);
return congested; return congested;
} }
......
...@@ -191,6 +191,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) ...@@ -191,6 +191,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
set_wb_congested(bdi->wb.congested, sync); set_wb_congested(bdi->wb.congested, sync);
} }
struct wb_lock_cookie {
bool locked;
unsigned long flags;
};
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
/** /**
......
...@@ -366,7 +366,7 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) ...@@ -366,7 +366,7 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
/** /**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode * @inode: target inode
* @lockedp: temp bool output param, to be passed to the end function * @cookie: output param, to be passed to the end function
* *
* The caller wants to access the wb associated with @inode but isn't * The caller wants to access the wb associated with @inode but isn't
* holding inode->i_lock, mapping->tree_lock or wb->list_lock. This * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
...@@ -374,12 +374,12 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) ...@@ -374,12 +374,12 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
* association doesn't change until the transaction is finished with * association doesn't change until the transaction is finished with
* unlocked_inode_to_wb_end(). * unlocked_inode_to_wb_end().
* *
* The caller must call unlocked_inode_to_wb_end() with *@lockdep * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
* afterwards and can't sleep during transaction. IRQ may or may not be * can't sleep during the transaction. IRQs may or may not be disabled on
* disabled on return. * return.
*/ */
static inline struct bdi_writeback * static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{ {
rcu_read_lock(); rcu_read_lock();
...@@ -387,10 +387,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) ...@@ -387,10 +387,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
* Paired with store_release in inode_switch_wb_work_fn() and * Paired with store_release in inode_switch_wb_work_fn() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH. * ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/ */
*lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
if (unlikely(*lockedp)) if (unlikely(cookie->locked))
spin_lock_irq(&inode->i_mapping->tree_lock); spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
/* /*
* Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock. * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
...@@ -402,12 +402,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) ...@@ -402,12 +402,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
/** /**
* unlocked_inode_to_wb_end - end inode wb access transaction * unlocked_inode_to_wb_end - end inode wb access transaction
* @inode: target inode * @inode: target inode
* @locked: *@lockedp from unlocked_inode_to_wb_begin() * @cookie: @cookie from unlocked_inode_to_wb_begin()
*/ */
static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) static inline void unlocked_inode_to_wb_end(struct inode *inode,
struct wb_lock_cookie *cookie)
{ {
if (unlikely(locked)) if (unlikely(cookie->locked))
spin_unlock_irq(&inode->i_mapping->tree_lock); spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -454,12 +455,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) ...@@ -454,12 +455,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
} }
static inline struct bdi_writeback * static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{ {
return inode_to_wb(inode); return inode_to_wb(inode);
} }
static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) static inline void unlocked_inode_to_wb_end(struct inode *inode,
struct wb_lock_cookie *cookie)
{ {
} }
......
...@@ -2506,13 +2506,13 @@ void account_page_redirty(struct page *page) ...@@ -2506,13 +2506,13 @@ void account_page_redirty(struct page *page)
if (mapping && mapping_cap_account_dirty(mapping)) { if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct bdi_writeback *wb; struct bdi_writeback *wb;
bool locked; struct wb_lock_cookie cookie = {};
wb = unlocked_inode_to_wb_begin(inode, &locked); wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied--; current->nr_dirtied--;
dec_node_page_state(page, NR_DIRTIED); dec_node_page_state(page, NR_DIRTIED);
dec_wb_stat(wb, WB_DIRTIED); dec_wb_stat(wb, WB_DIRTIED);
unlocked_inode_to_wb_end(inode, locked); unlocked_inode_to_wb_end(inode, &cookie);
} }
} }
EXPORT_SYMBOL(account_page_redirty); EXPORT_SYMBOL(account_page_redirty);
...@@ -2618,15 +2618,15 @@ void cancel_dirty_page(struct page *page) ...@@ -2618,15 +2618,15 @@ void cancel_dirty_page(struct page *page)
if (mapping_cap_account_dirty(mapping)) { if (mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct bdi_writeback *wb; struct bdi_writeback *wb;
bool locked; struct wb_lock_cookie cookie = {};
lock_page_memcg(page); lock_page_memcg(page);
wb = unlocked_inode_to_wb_begin(inode, &locked); wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page)) if (TestClearPageDirty(page))
account_page_cleaned(page, mapping, wb); account_page_cleaned(page, mapping, wb);
unlocked_inode_to_wb_end(inode, locked); unlocked_inode_to_wb_end(inode, &cookie);
unlock_page_memcg(page); unlock_page_memcg(page);
} else { } else {
ClearPageDirty(page); ClearPageDirty(page);
...@@ -2658,7 +2658,7 @@ int clear_page_dirty_for_io(struct page *page) ...@@ -2658,7 +2658,7 @@ int clear_page_dirty_for_io(struct page *page)
if (mapping && mapping_cap_account_dirty(mapping)) { if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct bdi_writeback *wb; struct bdi_writeback *wb;
bool locked; struct wb_lock_cookie cookie = {};
/* /*
* Yes, Virginia, this is indeed insane. * Yes, Virginia, this is indeed insane.
...@@ -2695,7 +2695,7 @@ int clear_page_dirty_for_io(struct page *page) ...@@ -2695,7 +2695,7 @@ int clear_page_dirty_for_io(struct page *page)
* always locked coming in here, so we get the desired * always locked coming in here, so we get the desired
* exclusion. * exclusion.
*/ */
wb = unlocked_inode_to_wb_begin(inode, &locked); wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page)) { if (TestClearPageDirty(page)) {
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
dec_node_page_state(page, NR_FILE_DIRTY); dec_node_page_state(page, NR_FILE_DIRTY);
...@@ -2703,7 +2703,7 @@ int clear_page_dirty_for_io(struct page *page) ...@@ -2703,7 +2703,7 @@ int clear_page_dirty_for_io(struct page *page)
dec_wb_stat(wb, WB_RECLAIMABLE); dec_wb_stat(wb, WB_RECLAIMABLE);
ret = 1; ret = 1;
} }
unlocked_inode_to_wb_end(inode, locked); unlocked_inode_to_wb_end(inode, &cookie);
return ret; return ret;
} }
return TestClearPageDirty(page); return TestClearPageDirty(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment