Commit f4a8871f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2023-05-18-15-52' of...

Merge tag 'mm-hotfixes-stable-2023-05-18-15-52' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "Eight hotfixes. Four are cc:stable, the other four are for post-6.4
  issues, or aren't considered suitable for backporting"

* tag 'mm-hotfixes-stable-2023-05-18-15-52' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  MAINTAINERS: Cleanup Arm Display IP maintainers
  MAINTAINERS: repair pattern in DIALOG SEMICONDUCTOR DRIVERS
  nilfs2: fix use-after-free bug of nilfs_root in nilfs_evict_inode()
  mm: fix zswap writeback race condition
  mm: kfence: fix false positives on big endian
  zsmalloc: move LRU update from zs_map_object() to zs_malloc()
  mm: shrinkers: fix race condition on debugfs cleanup
  maple_tree: make maple state reusable after mas_empty_area()
parents 2d1bcbc6 c7394fa9
......@@ -1677,10 +1677,7 @@ F: drivers/power/reset/arm-versatile-reboot.c
F: drivers/soc/versatile/
ARM KOMEDA DRM-KMS DRIVER
M: James (Qian) Wang <james.qian.wang@arm.com>
M: Liviu Dudau <liviu.dudau@arm.com>
M: Mihail Atanassov <mihail.atanassov@arm.com>
L: Mali DP Maintainers <malidp@foss.arm.com>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/arm,komeda.yaml
......@@ -1701,8 +1698,6 @@ F: include/uapi/drm/panfrost_drm.h
ARM MALI-DP DRM DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
M: Brian Starkey <brian.starkey@arm.com>
L: Mali DP Maintainers <malidp@foss.arm.com>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/arm,malidp.yaml
......@@ -6012,7 +6007,7 @@ W: http://www.dialog-semiconductor.com/products
F: Documentation/devicetree/bindings/input/da90??-onkey.txt
F: Documentation/devicetree/bindings/input/dlg,da72??.txt
F: Documentation/devicetree/bindings/mfd/da90*.txt
F: Documentation/devicetree/bindings/mfd/da90*.yaml
F: Documentation/devicetree/bindings/mfd/dlg,da90*.yaml
F: Documentation/devicetree/bindings/regulator/da92*.txt
F: Documentation/devicetree/bindings/regulator/dlg,da9*.yaml
F: Documentation/devicetree/bindings/regulator/slg51000.txt
......
......@@ -917,6 +917,7 @@ void nilfs_evict_inode(struct inode *inode)
struct nilfs_transaction_info ti;
struct super_block *sb = inode->i_sb;
struct nilfs_inode_info *ii = NILFS_I(inode);
struct the_nilfs *nilfs;
int ret;
if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
......@@ -929,6 +930,23 @@ void nilfs_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
nilfs = sb->s_fs_info;
if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
/*
* If this inode is about to be disposed after the file system
* has been degraded to read-only due to file system corruption
* or after the writer has been detached, do not make any
* changes that cause writes, just clear it.
* Do this check after read-locking ns_segctor_sem by
* nilfs_transaction_begin() in order to avoid a race with
* the writer detach operation.
*/
clear_inode(inode);
nilfs_clear_inode(inode);
nilfs_transaction_abort(sb);
return;
}
/* TODO: some of the following operations may fail. */
nilfs_truncate_bmap(ii, 0);
nilfs_mark_inode_dirty(inode);
......
......@@ -107,7 +107,10 @@ extern void synchronize_shrinkers(void);
#ifdef CONFIG_SHRINKER_DEBUG
extern int shrinker_debugfs_add(struct shrinker *shrinker);
extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
int *debugfs_id);
extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
int debugfs_id);
extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
const char *fmt, ...);
#else /* CONFIG_SHRINKER_DEBUG */
......@@ -115,10 +118,16 @@ static inline int shrinker_debugfs_add(struct shrinker *shrinker)
{
return 0;
}
static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
int *debugfs_id)
{
*debugfs_id = -1;
return NULL;
}
static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
int debugfs_id)
{
}
static inline __printf(2, 3)
int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
{
......
......@@ -5317,15 +5317,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
mt = mte_node_type(mas->node);
pivots = ma_pivots(mas_mn(mas), mt);
if (offset)
mas->min = pivots[offset - 1] + 1;
if (offset < mt_pivots[mt])
mas->max = pivots[offset];
if (mas->index < mas->min)
mas->index = mas->min;
min = mas_safe_min(mas, pivots, offset);
if (mas->index < min)
mas->index = min;
mas->last = mas->index + size - 1;
return 0;
}
......
......@@ -29,7 +29,7 @@
* canary of every 8 bytes is the same. 64-bit memory can be filled and checked
* at a time instead of byte by byte to improve performance.
*/
#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(0x0706050403020100))
#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(le64_to_cpu(0x0706050403020100)))
/* Maximum stack depth for reports. */
#define KFENCE_STACK_DEPTH 64
......
......@@ -237,7 +237,8 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
}
EXPORT_SYMBOL(shrinker_debugfs_rename);
struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
int *debugfs_id)
{
struct dentry *entry = shrinker->debugfs_entry;
......@@ -246,14 +247,18 @@ struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
kfree_const(shrinker->name);
shrinker->name = NULL;
if (entry) {
ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
shrinker->debugfs_entry = NULL;
}
*debugfs_id = entry ? shrinker->debugfs_id : -1;
shrinker->debugfs_entry = NULL;
return entry;
}
void shrinker_debugfs_remove(struct dentry *debugfs_entry, int debugfs_id)
{
debugfs_remove_recursive(debugfs_entry);
ida_free(&shrinker_debugfs_ida, debugfs_id);
}
static int __init shrinker_debugfs_init(void)
{
struct shrinker *shrinker;
......
......@@ -805,6 +805,7 @@ EXPORT_SYMBOL(register_shrinker);
void unregister_shrinker(struct shrinker *shrinker)
{
struct dentry *debugfs_entry;
int debugfs_id;
if (!(shrinker->flags & SHRINKER_REGISTERED))
return;
......@@ -814,13 +815,13 @@ void unregister_shrinker(struct shrinker *shrinker)
shrinker->flags &= ~SHRINKER_REGISTERED;
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
unregister_memcg_shrinker(shrinker);
debugfs_entry = shrinker_debugfs_remove(shrinker);
debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id);
mutex_unlock(&shrinker_mutex);
atomic_inc(&shrinker_srcu_generation);
synchronize_srcu(&shrinker_srcu);
debugfs_remove_recursive(debugfs_entry);
shrinker_debugfs_remove(debugfs_entry, debugfs_id);
kfree(shrinker->nr_deferred);
shrinker->nr_deferred = NULL;
......
......@@ -1331,31 +1331,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
obj_to_location(obj, &page, &obj_idx);
zspage = get_zspage(page);
#ifdef CONFIG_ZPOOL
/*
* Move the zspage to front of pool's LRU.
*
* Note that this is swap-specific, so by definition there are no ongoing
* accesses to the memory while the page is swapped out that would make
* it "hot". A new entry is hot, then ages to the tail until it gets either
* written back or swaps back in.
*
* Furthermore, map is also called during writeback. We must not put an
* isolated page on the LRU mid-reclaim.
*
* As a result, only update the LRU when the page is mapped for write
* when it's first instantiated.
*
* This is a deviation from the other backends, which perform this update
* in the allocation function (zbud_alloc, z3fold_alloc).
*/
if (mm == ZS_MM_WO) {
if (!list_empty(&zspage->lru))
list_del(&zspage->lru);
list_add(&zspage->lru, &pool->lru);
}
#endif
/*
* migration cannot move any zpages in this zspage. Here, pool->lock
* is too heavy since callers would take some time until they calls
......@@ -1525,9 +1500,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
fix_fullness_group(class, zspage);
record_obj(handle, obj);
class_stat_inc(class, ZS_OBJS_INUSE, 1);
spin_unlock(&pool->lock);
return handle;
goto out;
}
spin_unlock(&pool->lock);
......@@ -1550,6 +1524,14 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
/* We completely set up zspage so mark them as movable */
SetZsPageMovable(pool, zspage);
out:
#ifdef CONFIG_ZPOOL
/* Add/move zspage to beginning of LRU */
if (!list_empty(&zspage->lru))
list_del(&zspage->lru);
list_add(&zspage->lru, &pool->lru);
#endif
spin_unlock(&pool->lock);
return handle;
......
......@@ -1020,6 +1020,22 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
goto fail;
case ZSWAP_SWAPCACHE_NEW: /* page is locked */
/*
* Having a local reference to the zswap entry doesn't exclude
* swapping from invalidating and recycling the swap slot. Once
* the swapcache is secured against concurrent swapping to and
* from the slot, recheck that the entry is still current before
* writing.
*/
spin_lock(&tree->lock);
if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) {
spin_unlock(&tree->lock);
delete_from_swap_cache(page_folio(page));
ret = -ENOMEM;
goto fail;
}
spin_unlock(&tree->lock);
/* decompress */
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
dlen = PAGE_SIZE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment