Commit 62072526 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2022-07-29' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "Two hotfixes, both cc:stable"

* tag 'mm-hotfixes-stable-2022-07-29' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm/hmm: fault non-owner device private entries
  page_alloc: fix invalid watermark check on a negative value
parents 8a91f86f 8a295dbb
...@@ -212,14 +212,6 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, ...@@ -212,14 +212,6 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
unsigned long end, unsigned long hmm_pfns[], pmd_t pmd); unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline bool hmm_is_device_private_entry(struct hmm_range *range,
swp_entry_t entry)
{
return is_device_private_entry(entry) &&
pfn_swap_entry_to_page(entry)->pgmap->owner ==
range->dev_private_owner;
}
static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
pte_t pte) pte_t pte)
{ {
...@@ -252,10 +244,12 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, ...@@ -252,10 +244,12 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
swp_entry_t entry = pte_to_swp_entry(pte); swp_entry_t entry = pte_to_swp_entry(pte);
/* /*
* Never fault in device private pages, but just report * Don't fault in device private pages owned by the caller,
* the PFN even if not present. * just report the PFN.
*/ */
if (hmm_is_device_private_entry(range, entry)) { if (is_device_private_entry(entry) &&
pfn_swap_entry_to_page(entry)->pgmap->owner ==
range->dev_private_owner) {
cpu_flags = HMM_PFN_VALID; cpu_flags = HMM_PFN_VALID;
if (is_writable_device_private_entry(entry)) if (is_writable_device_private_entry(entry))
cpu_flags |= HMM_PFN_WRITE; cpu_flags |= HMM_PFN_WRITE;
...@@ -273,6 +267,9 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, ...@@ -273,6 +267,9 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
if (!non_swap_entry(entry)) if (!non_swap_entry(entry))
goto fault; goto fault;
if (is_device_private_entry(entry))
goto fault;
if (is_device_exclusive_entry(entry)) if (is_device_exclusive_entry(entry))
goto fault; goto fault;
......
...@@ -3968,11 +3968,15 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order, ...@@ -3968,11 +3968,15 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
* need to be calculated. * need to be calculated.
*/ */
if (!order) { if (!order) {
long fast_free; long usable_free;
long reserved;
fast_free = free_pages; usable_free = free_pages;
fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags); reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
/* reserved may over estimate high-atomic reserves. */
usable_free -= min(usable_free, reserved);
if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment