Commit 0b62af28 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

i915: convert shmem_sg_free_table() to use a folio_batch

Remove a few hidden compound_head() calls by converting the returned page
to a folio once and using the folio APIs.  We also only increment the
refcount on the folio once instead of once for each page.  Ideally, we
would have a for_each_sgt_folio macro, but until then this will do.

Link: https://lkml.kernel.org/r/20230621164557.3510324-5-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent bdadc6d8
......@@ -19,13 +19,13 @@
#include "i915_trace.h"
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
* ref count of those pages.
* Move folios to appropriate lru and release the batch, decrementing the
* ref count of those folios.
*/
static void check_release_pagevec(struct pagevec *pvec)
static void check_release_folio_batch(struct folio_batch *fbatch)
{
check_move_unevictable_pages(pvec);
__pagevec_release(pvec);
check_move_unevictable_folios(fbatch);
__folio_batch_release(fbatch);
cond_resched();
}
......@@ -33,24 +33,29 @@ void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
bool dirty, bool backup)
{
struct sgt_iter sgt_iter;
struct pagevec pvec;
struct folio_batch fbatch;
struct folio *last = NULL;
struct page *page;
mapping_clear_unevictable(mapping);
pagevec_init(&pvec);
folio_batch_init(&fbatch);
for_each_sgt_page(page, sgt_iter, st) {
if (dirty)
set_page_dirty(page);
struct folio *folio = page_folio(page);
if (folio == last)
continue;
last = folio;
if (dirty)
folio_mark_dirty(folio);
if (backup)
mark_page_accessed(page);
folio_mark_accessed(folio);
if (!pagevec_add(&pvec, page))
check_release_pagevec(&pvec);
if (!folio_batch_add(&fbatch, folio))
check_release_folio_batch(&fbatch);
}
if (pagevec_count(&pvec))
check_release_pagevec(&pvec);
if (fbatch.nr)
check_release_folio_batch(&fbatch);
sg_free_table(st);
}
......@@ -63,8 +68,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
unsigned int page_count; /* restricted by sg_alloc_table */
unsigned long i;
struct scatterlist *sg;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned long next_pfn = 0; /* suppress gcc warning */
gfp_t noreclaim;
int ret;
......@@ -95,6 +99,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
sg = st->sgl;
st->nents = 0;
for (i = 0; i < page_count; i++) {
struct folio *folio;
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
0,
......@@ -103,12 +108,12 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
do {
cond_resched();
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (!IS_ERR(page))
folio = shmem_read_folio_gfp(mapping, i, gfp);
if (!IS_ERR(folio))
break;
if (!*s) {
ret = PTR_ERR(page);
ret = PTR_ERR(folio);
goto err_sg;
}
......@@ -147,19 +152,21 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
if (!i ||
sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) {
folio_pfn(folio) != next_pfn) {
if (i)
sg = sg_next(sg);
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
sg_set_folio(sg, folio, folio_size(folio), 0);
} else {
sg->length += PAGE_SIZE;
/* XXX: could overflow? */
sg->length += folio_size(folio);
}
last_pfn = page_to_pfn(page);
next_pfn = folio_pfn(folio) + folio_nr_pages(folio);
i += folio_nr_pages(folio) - 1;
/* Check that the i965g/gm workaround works. */
GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
}
if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment