Commit e6002df8 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] tmpfs: scheduling-while-atomic fix

Nick has tracked scheduling-while-atomic errors to shmem's fragile kmap
avoidance: the root error appears to lie deeper, but rework that fragility.
Plus I've been indicted for war crimes at the end of shmem_swp_entry: my
apologia scorned, so now hide the evidence.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 23e6835d
...@@ -104,22 +104,24 @@ static inline void shmem_dir_unmap(struct page **dir) ...@@ -104,22 +104,24 @@ static inline void shmem_dir_unmap(struct page **dir)
} }
static swp_entry_t *shmem_swp_map(struct page *page) static swp_entry_t *shmem_swp_map(struct page *page)
{
return (swp_entry_t *)kmap_atomic(page, KM_USER1);
}
static inline void shmem_swp_balance_unmap(void)
{ {
/* /*
* We have to avoid the unconditional inc_preempt_count() * When passing a pointer to an i_direct entry, to code which
* in kmap_atomic(), since shmem_swp_unmap() will also be * also handles indirect entries and so will shmem_swp_unmap,
* applied to the low memory addresses within i_direct[]. * we must arrange for the preempt count to remain in balance.
* PageHighMem and high_memory tests are good for all arches * What kmap_atomic of a lowmem page does depends on config
* and configs: highmem_start_page and FIXADDR_START are not. * and architecture, so pretend to kmap_atomic some lowmem page.
*/ */
return PageHighMem(page)? (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
(swp_entry_t *)kmap_atomic(page, KM_USER1):
(swp_entry_t *)page_address(page);
} }
static inline void shmem_swp_unmap(swp_entry_t *entry) static inline void shmem_swp_unmap(swp_entry_t *entry)
{ {
if (entry >= (swp_entry_t *)high_memory)
kunmap_atomic(entry, KM_USER1); kunmap_atomic(entry, KM_USER1);
} }
...@@ -263,8 +265,10 @@ static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long ...@@ -263,8 +265,10 @@ static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long
struct page **dir; struct page **dir;
struct page *subdir; struct page *subdir;
if (index < SHMEM_NR_DIRECT) if (index < SHMEM_NR_DIRECT) {
shmem_swp_balance_unmap();
return info->i_direct+index; return info->i_direct+index;
}
if (!info->i_indirect) { if (!info->i_indirect) {
if (page) { if (page) {
info->i_indirect = *page; info->i_indirect = *page;
...@@ -306,17 +310,7 @@ static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long ...@@ -306,17 +310,7 @@ static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long
*page = NULL; *page = NULL;
} }
shmem_dir_unmap(dir); shmem_dir_unmap(dir);
return shmem_swp_map(subdir) + offset;
/*
* With apologies... caller shmem_swp_alloc passes non-NULL
* page (though perhaps NULL *page); and now we know that this
* indirect page has been allocated, we can shortcut the final
* kmap if we know it contains no swap entries, as is commonly
* the case: return pointer to a 0 which doesn't need kmapping.
*/
return (page && !subdir->nr_swapped)?
(swp_entry_t *)&subdir->nr_swapped:
shmem_swp_map(subdir) + offset;
} }
static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
...@@ -650,8 +644,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s ...@@ -650,8 +644,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
if (size > SHMEM_NR_DIRECT) if (size > SHMEM_NR_DIRECT)
size = SHMEM_NR_DIRECT; size = SHMEM_NR_DIRECT;
offset = shmem_find_swp(entry, ptr, ptr+size); offset = shmem_find_swp(entry, ptr, ptr+size);
if (offset >= 0) if (offset >= 0) {
shmem_swp_balance_unmap();
goto found; goto found;
}
if (!info->i_indirect) if (!info->i_indirect)
goto lost2; goto lost2;
/* we might be racing with shmem_truncate */ /* we might be racing with shmem_truncate */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment