highmem.c 19.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * High memory handling common code and variables.
 *
 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
 *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
 *
 *
 * Redesigned the x86 32-bit VM architecture to deal with
 * 64-bit physical space. With current x86 CPUs this
 * means up to 64 Gigabytes physical RAM.
 *
 * Rewrote high memory support to move the page cache into
 * high memory. Implemented permanent (schedulable) kmaps
 * based on Linus' idea.
 *
 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
 */

#include <linux/mm.h>
21
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
22 23 24 25 26 27 28
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/pagemap.h>
#include <linux/mempool.h>
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/highmem.h>
29
#include <linux/kgdb.h>
Linus Torvalds's avatar
Linus Torvalds committed
30
#include <asm/tlbflush.h>
31
#include <linux/vmalloc.h>
32

Linus Torvalds's avatar
Linus Torvalds committed
33 34 35 36 37 38 39 40 41
/*
 * Virtual_count is not a pure "count".
 *  0 means that it is not mapped, and has not been mapped
 *    since a TLB flush - it is usable.
 *  1 means that there are no users, but it has been mapped
 *    since the last TLB flush - so we can't use it.
 *  n means that there are (n-1) current users of it.
 */
#ifdef CONFIG_HIGHMEM
Al Viro's avatar
Al Viro committed
42

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
/*
 * Architecture with aliasing data cache may define the following family of
 * helper functions in its asm/highmem.h to control cache color of virtual
 * addresses where physical memory pages are mapped by kmap.
 */
#ifndef get_pkmap_color

/*
 * Determine color of virtual address where the page should be mapped.
 */
static inline unsigned int get_pkmap_color(struct page *page)
{
	return 0;
}
#define get_pkmap_color get_pkmap_color

/*
 * Get next index for mapping inside PKMAP region for page with given color.
 */
static inline unsigned int get_next_pkmap_nr(unsigned int color)
{
	static unsigned int last_pkmap_nr;

	last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
	return last_pkmap_nr;
}

/*
 * Determine if page index inside PKMAP region (pkmap_nr) of given color
 * has wrapped around PKMAP region end. When this happens an attempt to
 * flush all unused PKMAP slots is made.
 */
static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
{
	return pkmap_nr == 0;
}

/*
 * Get the number of PKMAP entries of the given color. If no free slot is
 * found after checking that many entries, kmap will sleep waiting for
 * someone to call kunmap and free PKMAP slot.
 */
static inline int get_pkmap_entries_count(unsigned int color)
{
	return LAST_PKMAP;
}

/*
 * Get head of a wait queue for PKMAP entries of the given color.
 * Wait queues for different mapping colors should be independent to avoid
 * unnecessary wakeups caused by freeing of slots of other colors.
 */
static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
{
	static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);

	return &pkmap_map_wait;
}
#endif

103 104
atomic_long_t _totalhigh_pages __read_mostly;
EXPORT_SYMBOL(_totalhigh_pages);
Peter Zijlstra's avatar
Peter Zijlstra committed
105

106
unsigned int __nr_free_highpages(void)
107
{
108
	struct zone *zone;
109 110
	unsigned int pages = 0;

111 112 113
	for_each_populated_zone(zone) {
		if (is_highmem(zone))
			pages += zone_page_state(zone, NR_FREE_PAGES);
Mel Gorman's avatar
Mel Gorman committed
114
	}
115 116 117 118

	return pages;
}

Linus Torvalds's avatar
Linus Torvalds committed
119 120 121
static int pkmap_count[LAST_PKMAP];
static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);

122
pte_t *pkmap_page_table;
Linus Torvalds's avatar
Linus Torvalds committed
123

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
/*
 * Most architectures have no use for kmap_high_get(), so let's abstract
 * the disabling of IRQ out of the locking in that case to save on a
 * potential useless overhead.
 */
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
#define lock_kmap()             spin_lock_irq(&kmap_lock)
#define unlock_kmap()           spin_unlock_irq(&kmap_lock)
#define lock_kmap_any(flags)    spin_lock_irqsave(&kmap_lock, flags)
#define unlock_kmap_any(flags)  spin_unlock_irqrestore(&kmap_lock, flags)
#else
#define lock_kmap()             spin_lock(&kmap_lock)
#define unlock_kmap()           spin_unlock(&kmap_lock)
#define lock_kmap_any(flags)    \
		do { spin_lock(&kmap_lock); (void)(flags); } while (0)
#define unlock_kmap_any(flags)  \
		do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
#endif

143
struct page *__kmap_to_page(void *vaddr)
144 145 146
{
	unsigned long addr = (unsigned long)vaddr;

147
	if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
148
		int i = PKMAP_NR(addr);
149

150 151 152 153 154
		return pte_page(pkmap_page_table[i]);
	}

	return virt_to_page(addr);
}
155
EXPORT_SYMBOL(__kmap_to_page);
156

Linus Torvalds's avatar
Linus Torvalds committed
157 158 159
static void flush_all_zero_pkmaps(void)
{
	int i;
160
	int need_flush = 0;
Linus Torvalds's avatar
Linus Torvalds committed
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

	flush_cache_kmaps();

	for (i = 0; i < LAST_PKMAP; i++) {
		struct page *page;

		/*
		 * zero means we don't have anything to do,
		 * >1 means that it is still in use. Only
		 * a count of 1 means that it is free but
		 * needs to be unmapped
		 */
		if (pkmap_count[i] != 1)
			continue;
		pkmap_count[i] = 0;

		/* sanity check */
178
		BUG_ON(pte_none(pkmap_page_table[i]));
Linus Torvalds's avatar
Linus Torvalds committed
179 180 181 182 183 184 185 186 187

		/*
		 * Don't need an atomic fetch-and-clear op here;
		 * no-one has the page mapped, and cannot get at
		 * its virtual address (and hence PTE) without first
		 * getting the kmap_lock (which is held here).
		 * So no dangers, even with speculative execution.
		 */
		page = pte_page(pkmap_page_table[i]);
188
		pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
Linus Torvalds's avatar
Linus Torvalds committed
189 190

		set_page_address(page, NULL);
191
		need_flush = 1;
Linus Torvalds's avatar
Linus Torvalds committed
192
	}
193 194
	if (need_flush)
		flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
Linus Torvalds's avatar
Linus Torvalds committed
195 196
}

197
void __kmap_flush_unused(void)
198
{
199
	lock_kmap();
200
	flush_all_zero_pkmaps();
201
	unlock_kmap();
202 203
}

Linus Torvalds's avatar
Linus Torvalds committed
204 205 206 207
static inline unsigned long map_new_virtual(struct page *page)
{
	unsigned long vaddr;
	int count;
208 209
	unsigned int last_pkmap_nr;
	unsigned int color = get_pkmap_color(page);
Linus Torvalds's avatar
Linus Torvalds committed
210 211

start:
212
	count = get_pkmap_entries_count(color);
Linus Torvalds's avatar
Linus Torvalds committed
213 214
	/* Find an empty entry */
	for (;;) {
215 216
		last_pkmap_nr = get_next_pkmap_nr(color);
		if (no_more_pkmaps(last_pkmap_nr, color)) {
Linus Torvalds's avatar
Linus Torvalds committed
217
			flush_all_zero_pkmaps();
218
			count = get_pkmap_entries_count(color);
Linus Torvalds's avatar
Linus Torvalds committed
219 220 221 222 223 224 225 226 227 228 229
		}
		if (!pkmap_count[last_pkmap_nr])
			break;	/* Found a usable entry */
		if (--count)
			continue;

		/*
		 * Sleep for somebody else to unmap their entries
		 */
		{
			DECLARE_WAITQUEUE(wait, current);
230 231
			wait_queue_head_t *pkmap_map_wait =
				get_pkmap_wait_queue_head(color);
Linus Torvalds's avatar
Linus Torvalds committed
232 233

			__set_current_state(TASK_UNINTERRUPTIBLE);
234
			add_wait_queue(pkmap_map_wait, &wait);
235
			unlock_kmap();
Linus Torvalds's avatar
Linus Torvalds committed
236
			schedule();
237
			remove_wait_queue(pkmap_map_wait, &wait);
238
			lock_kmap();
Linus Torvalds's avatar
Linus Torvalds committed
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257

			/* Somebody else might have mapped it while we slept */
			if (page_address(page))
				return (unsigned long)page_address(page);

			/* Re-start */
			goto start;
		}
	}
	vaddr = PKMAP_ADDR(last_pkmap_nr);
	set_pte_at(&init_mm, vaddr,
		   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));

	pkmap_count[last_pkmap_nr] = 1;
	set_page_address(page, (void *)vaddr);

	return vaddr;
}

258 259 260 261 262 263 264 265
/**
 * kmap_high - map a highmem page into memory
 * @page: &struct page to map
 *
 * Returns the page's virtual memory address.
 *
 * We cannot call this from interrupts, as it may block.
 */
Harvey Harrison's avatar
Harvey Harrison committed
266
void *kmap_high(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
267 268 269 270 271 272 273
{
	unsigned long vaddr;

	/*
	 * For highmem pages, we can't trust "virtual" until
	 * after we have the lock.
	 */
274
	lock_kmap();
Linus Torvalds's avatar
Linus Torvalds committed
275 276 277 278
	vaddr = (unsigned long)page_address(page);
	if (!vaddr)
		vaddr = map_new_virtual(page);
	pkmap_count[PKMAP_NR(vaddr)]++;
279
	BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
280
	unlock_kmap();
281
	return (void *) vaddr;
Linus Torvalds's avatar
Linus Torvalds committed
282 283 284
}
EXPORT_SYMBOL(kmap_high);

285 286 287 288 289 290
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
/**
 * kmap_high_get - pin a highmem page into memory
 * @page: &struct page to pin
 *
 * Returns the page's current virtual memory address, or NULL if no mapping
Uwe Kleine-König's avatar
Uwe Kleine-König committed
291
 * exists.  If and only if a non null address is returned then a
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
 * matching call to kunmap_high() is necessary.
 *
 * This can be called from any context.
 */
void *kmap_high_get(struct page *page)
{
	unsigned long vaddr, flags;

	lock_kmap_any(flags);
	vaddr = (unsigned long)page_address(page);
	if (vaddr) {
		BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
		pkmap_count[PKMAP_NR(vaddr)]++;
	}
	unlock_kmap_any(flags);
307
	return (void *) vaddr;
308 309 310
}
#endif

311
/**
Li Haifeng's avatar
Li Haifeng committed
312
 * kunmap_high - unmap a highmem page into memory
313
 * @page: &struct page to unmap
314 315 316
 *
 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
 * only from user context.
317
 */
Harvey Harrison's avatar
Harvey Harrison committed
318
void kunmap_high(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
319 320 321
{
	unsigned long vaddr;
	unsigned long nr;
322
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
323
	int need_wakeup;
324 325
	unsigned int color = get_pkmap_color(page);
	wait_queue_head_t *pkmap_map_wait;
Linus Torvalds's avatar
Linus Torvalds committed
326

327
	lock_kmap_any(flags);
Linus Torvalds's avatar
Linus Torvalds committed
328
	vaddr = (unsigned long)page_address(page);
329
	BUG_ON(!vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	nr = PKMAP_NR(vaddr);

	/*
	 * A count must never go down to zero
	 * without a TLB flush!
	 */
	need_wakeup = 0;
	switch (--pkmap_count[nr]) {
	case 0:
		BUG();
	case 1:
		/*
		 * Avoid an unnecessary wake_up() function call.
		 * The common case is pkmap_count[] == 1, but
		 * no waiters.
		 * The tasks queued in the wait-queue are guarded
		 * by both the lock in the wait-queue-head and by
		 * the kmap_lock.  As the kmap_lock is held here,
		 * no need for the wait-queue-head's lock.  Simply
		 * test if the queue is empty.
		 */
351 352
		pkmap_map_wait = get_pkmap_wait_queue_head(color);
		need_wakeup = waitqueue_active(pkmap_map_wait);
Linus Torvalds's avatar
Linus Torvalds committed
353
	}
354
	unlock_kmap_any(flags);
Linus Torvalds's avatar
Linus Torvalds committed
355 356 357

	/* do wake-up, if needed, race-free outside of the spin lock */
	if (need_wakeup)
358
		wake_up(pkmap_map_wait);
Linus Torvalds's avatar
Linus Torvalds committed
359 360
}
EXPORT_SYMBOL(kunmap_high);
361 362 363 364 365 366 367 368 369

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
		unsigned start2, unsigned end2)
{
	unsigned int i;

	BUG_ON(end1 > page_size(page) || end2 > page_size(page));

370 371 372 373 374
	if (start1 >= end1)
		start1 = end1 = 0;
	if (start2 >= end2)
		start2 = end2 = 0;

375 376 377 378 379 380 381 382 383
	for (i = 0; i < compound_nr(page); i++) {
		void *kaddr = NULL;

		if (start1 >= PAGE_SIZE) {
			start1 -= PAGE_SIZE;
			end1 -= PAGE_SIZE;
		} else {
			unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);

384 385
			if (end1 > start1) {
				kaddr = kmap_atomic(page + i);
386
				memset(kaddr + start1, 0, this_end - start1);
387
			}
388 389 390 391 392 393 394 395 396 397
			end1 -= this_end;
			start1 = 0;
		}

		if (start2 >= PAGE_SIZE) {
			start2 -= PAGE_SIZE;
			end2 -= PAGE_SIZE;
		} else {
			unsigned this_end = min_t(unsigned, end2, PAGE_SIZE);

398 399 400
			if (end2 > start2) {
				if (!kaddr)
					kaddr = kmap_atomic(page + i);
401
				memset(kaddr + start2, 0, this_end - start2);
402
			}
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
			end2 -= this_end;
			start2 = 0;
		}

		if (kaddr) {
			kunmap_atomic(kaddr);
			flush_dcache_page(page + i);
		}

		if (!end1 && !end2)
			break;
	}

	BUG_ON((start1 | start2 | end1 | end2) != 0);
}
EXPORT_SYMBOL(zero_user_segments);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
420 421 422 423 424 425
#endif /* CONFIG_HIGHMEM */

#ifdef CONFIG_KMAP_LOCAL

#include <asm/kmap_size.h>

426
/*
427
 * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second
428 429
 * slot is unused which acts as a guard page
 */
430
#ifdef CONFIG_DEBUG_KMAP_LOCAL
431 432 433 434 435
# define KM_INCR	2
#else
# define KM_INCR	1
#endif

436 437
static inline int kmap_local_idx_push(void)
{
Changbin Du's avatar
Changbin Du committed
438
	WARN_ON_ONCE(in_hardirq() && !irqs_disabled());
439 440 441
	current->kmap_ctrl.idx += KM_INCR;
	BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
	return current->kmap_ctrl.idx - 1;
442 443 444 445
}

static inline int kmap_local_idx(void)
{
446
	return current->kmap_ctrl.idx - 1;
447 448 449 450
}

static inline void kmap_local_idx_pop(void)
{
451 452
	current->kmap_ctrl.idx -= KM_INCR;
	BUG_ON(current->kmap_ctrl.idx < 0);
453 454 455 456 457
}

#ifndef arch_kmap_local_post_map
# define arch_kmap_local_post_map(vaddr, pteval)	do { } while (0)
#endif
458

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
#ifndef arch_kmap_local_pre_unmap
# define arch_kmap_local_pre_unmap(vaddr)		do { } while (0)
#endif

#ifndef arch_kmap_local_post_unmap
# define arch_kmap_local_post_unmap(vaddr)		do { } while (0)
#endif

#ifndef arch_kmap_local_map_idx
#define arch_kmap_local_map_idx(idx, pfn)	kmap_local_calc_idx(idx)
#endif

#ifndef arch_kmap_local_unmap_idx
#define arch_kmap_local_unmap_idx(idx, vaddr)	kmap_local_calc_idx(idx)
#endif

#ifndef arch_kmap_local_high_get
static inline void *arch_kmap_local_high_get(struct page *page)
{
	return NULL;
}
#endif

482 483 484 485 486
#ifndef arch_kmap_local_set_pte
#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev)	\
	set_pte_at(mm, vaddr, ptep, ptev)
#endif

487
/* Unmap a local mapping which was obtained by kmap_high_get() */
488
static inline bool kmap_high_unmap_local(unsigned long vaddr)
489 490
{
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
491
	if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
492
		kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
493 494
		return true;
	}
495
#endif
496
	return false;
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
}

static inline int kmap_local_calc_idx(int idx)
{
	return idx + KM_MAX_IDX * smp_processor_id();
}

static pte_t *__kmap_pte;

static pte_t *kmap_get_pte(void)
{
	if (!__kmap_pte)
		__kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
	return __kmap_pte;
}

void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
{
	pte_t pteval, *kmap_pte = kmap_get_pte();
	unsigned long vaddr;
	int idx;

519 520
	/*
	 * Disable migration so resulting virtual address is stable
Ingo Molnar's avatar
Ingo Molnar committed
521
	 * across preemption.
522 523
	 */
	migrate_disable();
524 525 526 527 528
	preempt_disable();
	idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	BUG_ON(!pte_none(*(kmap_pte - idx)));
	pteval = pfn_pte(pfn, prot);
529
	arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
530
	arch_kmap_local_post_map(vaddr, pteval);
531
	current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
532 533 534 535 536 537 538 539 540 541
	preempt_enable();

	return (void *)vaddr;
}
EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);

void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
{
	void *kmap;

542 543 544 545 546 547
	/*
	 * To broaden the usage of the actual kmap_local() machinery always map
	 * pages when debugging is enabled and the architecture has no problems
	 * with alias mappings.
	 */
	if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page))
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
		return page_address(page);

	/* Try kmap_high_get() if architecture has it enabled */
	kmap = arch_kmap_local_high_get(page);
	if (kmap)
		return kmap;

	return __kmap_local_pfn_prot(page_to_pfn(page), prot);
}
EXPORT_SYMBOL(__kmap_local_page_prot);

void kunmap_local_indexed(void *vaddr)
{
	unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
	pte_t *kmap_pte = kmap_get_pte();
	int idx;

	if (addr < __fix_to_virt(FIX_KMAP_END) ||
	    addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
567 568 569 570 571
		if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) {
			/* This _should_ never happen! See above. */
			WARN_ON_ONCE(1);
			return;
		}
572 573 574 575 576 577 578 579
		/*
		 * Handle mappings which were obtained by kmap_high_get()
		 * first as the virtual address of such mappings is below
		 * PAGE_OFFSET. Warn for all other addresses which are in
		 * the user space part of the virtual address space.
		 */
		if (!kmap_high_unmap_local(addr))
			WARN_ON_ONCE(addr < PAGE_OFFSET);
580 581 582 583 584 585 586 587 588 589
		return;
	}

	preempt_disable();
	idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
	WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));

	arch_kmap_local_pre_unmap(addr);
	pte_clear(&init_mm, addr, kmap_pte - idx);
	arch_kmap_local_post_unmap(addr);
590
	current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
591 592
	kmap_local_idx_pop();
	preempt_enable();
593
	migrate_enable();
594 595
}
EXPORT_SYMBOL(kunmap_local_indexed);
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619

/*
 * Invoked before switch_to(). This is safe even when during or after
 * clearing the maps an interrupt which needs a kmap_local happens because
 * the task::kmap_ctrl.idx is not modified by the unmapping code so a
 * nested kmap_local will use the next unused index and restore the index
 * on unmap. The already cleared kmaps of the outgoing task are irrelevant
 * because the interrupt context does not know about them. The same applies
 * when scheduling back in for an interrupt which happens before the
 * restore is complete.
 */
void __kmap_local_sched_out(void)
{
	struct task_struct *tsk = current;
	pte_t *kmap_pte = kmap_get_pte();
	int i;

	/* Clear kmaps */
	for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
		pte_t pteval = tsk->kmap_ctrl.pteval[i];
		unsigned long addr;
		int idx;

		/* With debug all even slots are unmapped and act as guard */
620
		if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
			WARN_ON_ONCE(!pte_none(pteval));
			continue;
		}
		if (WARN_ON_ONCE(pte_none(pteval)))
			continue;

		/*
		 * This is a horrible hack for XTENSA to calculate the
		 * coloured PTE index. Uses the PFN encoded into the pteval
		 * and the map index calculation because the actual mapped
		 * virtual address is not stored in task::kmap_ctrl.
		 * For any sane architecture this is optimized out.
		 */
		idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));

		addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
		arch_kmap_local_pre_unmap(addr);
		pte_clear(&init_mm, addr, kmap_pte - idx);
		arch_kmap_local_post_unmap(addr);
	}
}

void __kmap_local_sched_in(void)
{
	struct task_struct *tsk = current;
	pte_t *kmap_pte = kmap_get_pte();
	int i;

	/* Restore kmaps */
	for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
		pte_t pteval = tsk->kmap_ctrl.pteval[i];
		unsigned long addr;
		int idx;

		/* With debug all even slots are unmapped and act as guard */
656
		if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
			WARN_ON_ONCE(!pte_none(pteval));
			continue;
		}
		if (WARN_ON_ONCE(pte_none(pteval)))
			continue;

		/* See comment in __kmap_local_sched_out() */
		idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
		addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
		set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
		arch_kmap_local_post_map(addr, pteval);
	}
}

void kmap_local_fork(struct task_struct *tsk)
{
	if (WARN_ON_ONCE(tsk->kmap_ctrl.idx))
		memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl));
}

677
#endif
Linus Torvalds's avatar
Linus Torvalds committed
678 679 680 681 682 683 684 685 686 687 688 689 690 691

#if defined(HASHED_PAGE_VIRTUAL)

#define PA_HASH_ORDER	7

/*
 * Describes one page->virtual association
 */
struct page_address_map {
	struct page *page;
	void *virtual;
	struct list_head list;
};

692
static struct page_address_map page_address_maps[LAST_PKMAP];
Linus Torvalds's avatar
Linus Torvalds committed
693 694 695 696 697 698 699 700 701

/*
 * Hash table bucket
 */
static struct page_address_slot {
	struct list_head lh;			/* List of page_address_maps */
	spinlock_t lock;			/* Protect this bucket's list */
} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];

702
static struct page_address_slot *page_slot(const struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
703 704 705 706
{
	return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
}

707 708 709 710 711 712
/**
 * page_address - get the mapped virtual address of a page
 * @page: &struct page to get the virtual address of
 *
 * Returns the page's virtual address.
 */
713
void *page_address(const struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
{
	unsigned long flags;
	void *ret;
	struct page_address_slot *pas;

	if (!PageHighMem(page))
		return lowmem_page_address(page);

	pas = page_slot(page);
	ret = NULL;
	spin_lock_irqsave(&pas->lock, flags);
	if (!list_empty(&pas->lh)) {
		struct page_address_map *pam;

		list_for_each_entry(pam, &pas->lh, list) {
			if (pam->page == page) {
				ret = pam->virtual;
				goto done;
			}
		}
	}
done:
	spin_unlock_irqrestore(&pas->lock, flags);
	return ret;
}
EXPORT_SYMBOL(page_address);

741 742 743 744 745
/**
 * set_page_address - set a page's virtual address
 * @page: &struct page to set
 * @virtual: virtual address to use
 */
Linus Torvalds's avatar
Linus Torvalds committed
746 747 748 749 750 751 752 753 754 755
void set_page_address(struct page *page, void *virtual)
{
	unsigned long flags;
	struct page_address_slot *pas;
	struct page_address_map *pam;

	BUG_ON(!PageHighMem(page));

	pas = page_slot(page);
	if (virtual) {		/* Add */
756
		pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
Linus Torvalds's avatar
Linus Torvalds committed
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
		pam->page = page;
		pam->virtual = virtual;

		spin_lock_irqsave(&pas->lock, flags);
		list_add_tail(&pam->list, &pas->lh);
		spin_unlock_irqrestore(&pas->lock, flags);
	} else {		/* Remove */
		spin_lock_irqsave(&pas->lock, flags);
		list_for_each_entry(pam, &pas->lh, list) {
			if (pam->page == page) {
				list_del(&pam->list);
				spin_unlock_irqrestore(&pas->lock, flags);
				goto done;
			}
		}
		spin_unlock_irqrestore(&pas->lock, flags);
	}
done:
	return;
}

void __init page_address_init(void)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
		INIT_LIST_HEAD(&page_address_htable[i].lh);
		spin_lock_init(&page_address_htable[i].lock);
	}
}

788
#endif	/* defined(HASHED_PAGE_VIRTUAL) */