Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
6660800f
Commit
6660800f
authored
Jan 12, 2016
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'devel-stable' into for-linus
parents
598bcc6e
06312f44
Changes
29
Show whitespace changes
Inline
Side-by-side
Showing
29 changed files
with
986 additions
and
368 deletions
+986
-368
arch/arm/Kconfig
arch/arm/Kconfig
+20
-0
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/Makefile
+3
-1
arch/arm/boot/compressed/efi-header.S
arch/arm/boot/compressed/efi-header.S
+130
-0
arch/arm/boot/compressed/head.S
arch/arm/boot/compressed/head.S
+52
-2
arch/arm/boot/compressed/vmlinux.lds.S
arch/arm/boot/compressed/vmlinux.lds.S
+7
-0
arch/arm/include/asm/Kbuild
arch/arm/include/asm/Kbuild
+1
-0
arch/arm/include/asm/efi.h
arch/arm/include/asm/efi.h
+83
-0
arch/arm/include/asm/fixmap.h
arch/arm/include/asm/fixmap.h
+28
-1
arch/arm/include/asm/mach/map.h
arch/arm/include/asm/mach/map.h
+2
-0
arch/arm/include/asm/mmu_context.h
arch/arm/include/asm/mmu_context.h
+1
-1
arch/arm/kernel/Makefile
arch/arm/kernel/Makefile
+1
-0
arch/arm/kernel/efi.c
arch/arm/kernel/efi.c
+38
-0
arch/arm/kernel/setup.c
arch/arm/kernel/setup.c
+8
-2
arch/arm/mm/init.c
arch/arm/mm/init.c
+4
-1
arch/arm/mm/ioremap.c
arch/arm/mm/ioremap.c
+9
-0
arch/arm/mm/mmu.c
arch/arm/mm/mmu.c
+87
-41
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/efi.h
+9
-0
arch/arm64/kernel/efi.c
arch/arm64/kernel/efi.c
+17
-317
arch/arm64/mm/init.c
arch/arm64/mm/init.c
+1
-1
arch/arm64/mm/mmu.c
arch/arm64/mm/mmu.c
+2
-0
drivers/firmware/efi/Makefile
drivers/firmware/efi/Makefile
+4
-0
drivers/firmware/efi/arm-init.c
drivers/firmware/efi/arm-init.c
+209
-0
drivers/firmware/efi/arm-runtime.c
drivers/firmware/efi/arm-runtime.c
+135
-0
drivers/firmware/efi/efi.c
drivers/firmware/efi/efi.c
+2
-0
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/Makefile
+9
-0
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/arm-stub.c
+3
-1
drivers/firmware/efi/libstub/arm32-stub.c
drivers/firmware/efi/libstub/arm32-stub.c
+85
-0
include/linux/memblock.h
include/linux/memblock.h
+8
-0
mm/memblock.c
mm/memblock.c
+28
-0
No files found.
arch/arm/Kconfig
View file @
6660800f
...
...
@@ -20,6 +20,7 @@ config ARM
select GENERIC_ALLOCATOR
select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI)
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
...
...
@@ -2060,6 +2061,25 @@ config AUTO_ZRELADDR
0xf8000000. This assumes the zImage being placed in the first 128MB
from start of memory.
config EFI_STUB
bool
config EFI
bool "UEFI runtime support"
depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL
select UCS2_STRING
select EFI_PARAMS_FROM_FDT
select EFI_STUB
select EFI_ARMSTUB
select EFI_RUNTIME_WRAPPERS
---help---
This option provides support for runtime services provided
by UEFI firmware (such as non-volatile variables, realtime
clock, and platform reset). A UEFI stub is also provided to
allow the kernel to be booted as an EFI application. This
is only useful for kernels that may run on systems that have
UEFI firmware.
endmenu
menu "CPU Power Management"
...
...
arch/arm/boot/compressed/Makefile
View file @
6660800f
...
...
@@ -167,9 +167,11 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \
false
;
\
fi
efi-obj-$(CONFIG_EFI_STUB)
:=
$(objtree)
/drivers/firmware/efi/libstub/lib.a
$(obj)/vmlinux
:
$(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o
\
$(addprefix $(obj)/
,
$(OBJS)) $(lib1funcs) $(ashldi3)
\
$(bswapsdi2) FORCE
$(bswapsdi2)
$(efi-obj-y)
FORCE
@
$(check_for_multiple_zreladdr)
$(
call
if_changed,ld
)
@
$(check_for_bad_syms)
...
...
arch/arm/boot/compressed/efi-header.S
0 → 100644
View file @
6660800f
/*
*
Copyright
(
C
)
2013
-
2015
Linaro
Ltd
*
Authors
:
Roy
Franz
<
roy
.
franz
@
linaro
.
org
>
*
Ard
Biesheuvel
<
ard
.
biesheuvel
@
linaro
.
org
>
*
*
This
program
is
free
software
; you can redistribute it and/or modify
*
it
under
the
terms
of
the
GNU
General
Public
License
version
2
as
*
published
by
the
Free
Software
Foundation
.
*/
.
macro
__nop
#ifdef CONFIG_EFI_STUB
@
This
is
almost
but
not
quite
a
NOP
,
since
it
does
clobber
the
@
condition
flags
.
But
it
is
the
best
we
can
do
for
EFI
,
since
@
PE
/
COFF
expects
the
magic
string
"MZ"
at
offset
0
,
while
the
@
ARM
/
Linux
boot
protocol
expects
an
executable
instruction
@
there
.
.
inst
'M'
| ('Z' << 8) |
(
0x1310
<<
16
)
@
tstne
r0
,
#
0x4d000
#else
mov
r0
,
r0
#endif
.
endm
.
macro
__EFI_HEADER
#ifdef CONFIG_EFI_STUB
b
__efi_start
.
set
start_offset
,
__efi_start
-
start
.
org
start
+
0x3c
@
@
The
PE
header
can
be
anywhere
in
the
file
,
but
for
@
simplicity
we
keep
it
together
with
the
MSDOS
header
@
The
offset
to
the
PE
/
COFF
header
needs
to
be
at
offset
@
0x3C
in
the
MSDOS
header
.
@
The
only
2
fields
of
the
MSDOS
header
that
are
used
are
this
@
PE
/
COFF
offset
,
and
the
"MZ"
bytes
at
offset
0x0
.
@
.
long
pe_header
-
start
@
Offset
to
the
PE
header
.
pe_header
:
.
ascii
"PE\0\0"
coff_header
:
.
short
0x01c2
@
ARM
or
Thumb
.
short
2
@
nr_sections
.
long
0
@
TimeDateStamp
.
long
0
@
PointerToSymbolTable
.
long
1
@
NumberOfSymbols
.
short
section_table
-
optional_header
@
SizeOfOptionalHeader
.
short
0x306
@
Characteristics
.
@
IMAGE_FILE_32BIT_MACHINE
|
@
IMAGE_FILE_DEBUG_STRIPPED
|
@
IMAGE_FILE_EXECUTABLE_IMAGE
|
@
IMAGE_FILE_LINE_NUMS_STRIPPED
optional_header
:
.
short
0x10b
@
PE32
format
.
byte
0x02
@
MajorLinkerVersion
.
byte
0x14
@
MinorLinkerVersion
.
long
_end
-
__efi_start
@
SizeOfCode
.
long
0
@
SizeOfInitializedData
.
long
0
@
SizeOfUninitializedData
.
long
efi_stub_entry
-
start
@
AddressOfEntryPoint
.
long
start_offset
@
BaseOfCode
.
long
0
@
data
extra_header_fields
:
.
long
0
@
ImageBase
.
long
0x200
@
SectionAlignment
.
long
0x200
@
FileAlignment
.
short
0
@
MajorOperatingSystemVersion
.
short
0
@
MinorOperatingSystemVersion
.
short
0
@
MajorImageVersion
.
short
0
@
MinorImageVersion
.
short
0
@
MajorSubsystemVersion
.
short
0
@
MinorSubsystemVersion
.
long
0
@
Win32VersionValue
.
long
_end
-
start
@
SizeOfImage
.
long
start_offset
@
SizeOfHeaders
.
long
0
@
CheckSum
.
short
0xa
@
Subsystem
(
EFI
application
)
.
short
0
@
DllCharacteristics
.
long
0
@
SizeOfStackReserve
.
long
0
@
SizeOfStackCommit
.
long
0
@
SizeOfHeapReserve
.
long
0
@
SizeOfHeapCommit
.
long
0
@
LoaderFlags
.
long
0x6
@
NumberOfRvaAndSizes
.
quad
0
@
ExportTable
.
quad
0
@
ImportTable
.
quad
0
@
ResourceTable
.
quad
0
@
ExceptionTable
.
quad
0
@
CertificationTable
.
quad
0
@
BaseRelocationTable
section_table
:
@
@
The
EFI
application
loader
requires
a
relocation
section
@
because
EFI
applications
must
be
relocatable
.
This
is
a
@
dummy
section
as
far
as
we
are
concerned
.
@
.
ascii
".reloc\0\0"
.
long
0
@
VirtualSize
.
long
0
@
VirtualAddress
.
long
0
@
SizeOfRawData
.
long
0
@
PointerToRawData
.
long
0
@
PointerToRelocations
.
long
0
@
PointerToLineNumbers
.
short
0
@
NumberOfRelocations
.
short
0
@
NumberOfLineNumbers
.
long
0x42100040
@
Characteristics
.
ascii
".text\0\0\0"
.
long
_end
-
__efi_start
@
VirtualSize
.
long
__efi_start
@
VirtualAddress
.
long
_edata
-
__efi_start
@
SizeOfRawData
.
long
__efi_start
@
PointerToRawData
.
long
0
@
PointerToRelocations
.
long
0
@
PointerToLineNumbers
.
short
0
@
NumberOfRelocations
.
short
0
@
NumberOfLineNumbers
.
long
0xe0500020
@
Characteristics
.
align
9
__efi_start
:
#endif
.
endm
arch/arm/boot/compressed/head.S
View file @
6660800f
...
...
@@ -12,6 +12,8 @@
#include <asm/assembler.h>
#include <asm/v7m.h>
#include "efi-header.S"
AR_CLASS
(
.
arch
armv7
-
a
)
M_CLASS
(
.
arch
armv7
-
m
)
...
...
@@ -126,7 +128,7 @@
start
:
.
type
start
,#
function
.
rept
7
mov
r0
,
r0
__nop
.
endr
ARM
(
mov
r0
,
r0
)
ARM
(
b
1
f
)
...
...
@@ -139,7 +141,8 @@ start:
.
word
0x04030201
@
endianness
flag
THUMB
(
.
thumb
)
1
:
1
:
__EFI_HEADER
ARM_BE8
(
setend
be
)
@
go
BE8
if
compiled
for
BE8
AR_CLASS
(
mrs
r9
,
cpsr
)
#ifdef CONFIG_ARM_VIRT_EXT
...
...
@@ -1353,6 +1356,53 @@ __enter_kernel:
reloc_code_end
:
#ifdef CONFIG_EFI_STUB
.
align
2
_start
:
.
long
start
-
.
ENTRY
(
efi_stub_entry
)
@
allocate
space
on
stack
for
passing
current
zImage
address
@
and
for
the
EFI
stub
to
return
of
new
entry
point
of
@
zImage
,
as
EFI
stub
may
copy
the
kernel
.
Pointer
address
@
is
passed
in
r2
.
r0
and
r1
are
passed
through
from
the
@
EFI
firmware
to
efi_entry
adr
ip
,
_start
ldr
r3
,
[
ip
]
add
r3
,
r3
,
ip
stmfd
sp
!,
{
r3
,
lr
}
mov
r2
,
sp
@
pass
zImage
address
in
r2
bl
efi_entry
@
Check
for
error
return
from
EFI
stub
.
r0
has
FDT
address
@
or
error
code
.
cmn
r0
,
#
1
beq
efi_load_fail
@
Preserve
return
value
of
efi_entry
()
in
r4
mov
r4
,
r0
bl
cache_clean_flush
bl
cache_off
@
Set
parameters
for
booting
zImage
according
to
boot
protocol
@
put
FDT
address
in
r2
,
it
was
returned
by
efi_entry
()
@
r1
is
the
machine
type
,
and
r0
needs
to
be
0
mov
r0
,
#
0
mov
r1
,
#
0xFFFFFFFF
mov
r2
,
r4
@
Branch
to
(
possibly
)
relocated
zImage
that
is
in
[
sp
]
ldr
lr
,
[
sp
]
ldr
ip
,
=
start_offset
add
lr
,
lr
,
ip
mov
pc
,
lr
@
no
mode
switch
efi_load_fail
:
@
Return
EFI_LOAD_ERROR
to
EFI
firmware
on
error
.
ldr
r0
,
=
0x80000001
ldmfd
sp
!,
{
ip
,
pc
}
ENDPROC
(
efi_stub_entry
)
#endif
.
align
.
section
".stack"
,
"aw"
,
%
nobits
.
L_user_stack
:
.
space
4096
...
...
arch/arm/boot/compressed/vmlinux.lds.S
View file @
6660800f
...
...
@@ -48,6 +48,13 @@ SECTIONS
*(.
rodata
)
*(.
rodata
.*)
}
.
data
:
{
/
*
*
The
EFI
stub
always
executes
from
RAM
,
and
runs
strictly
before
the
*
decompressor
,
so
we
can
make
an
exception
for
its
r
/
w
data
,
and
keep
it
*/
*(.
data.efistub
)
}
.
piggydata
:
{
*(.
piggydata
)
}
...
...
arch/arm/include/asm/Kbuild
View file @
6660800f
...
...
@@ -3,6 +3,7 @@
generic-y += bitsperlong.h
generic-y += cputime.h
generic-y += current.h
generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
...
...
arch/arm/include/asm/efi.h
0 → 100644
View file @
6660800f
/*
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_EFI_H
#define __ASM_ARM_EFI_H
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/early_ioremap.h>
#include <asm/fixmap.h>
#include <asm/highmem.h>
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#ifdef CONFIG_EFI
void
efi_init
(
void
);
int
efi_create_mapping
(
struct
mm_struct
*
mm
,
efi_memory_desc_t
*
md
);
#define efi_call_virt(f, ...) \
({ \
efi_##f##_t *__f; \
efi_status_t __s; \
\
efi_virtmap_load(); \
__f = efi.systab->runtime->f; \
__s = __f(__VA_ARGS__); \
efi_virtmap_unload(); \
__s; \
})
#define __efi_call_virt(f, ...) \
({ \
efi_##f##_t *__f; \
\
efi_virtmap_load(); \
__f = efi.systab->runtime->f; \
__f(__VA_ARGS__); \
efi_virtmap_unload(); \
})
static
inline
void
efi_set_pgd
(
struct
mm_struct
*
mm
)
{
check_and_switch_context
(
mm
,
NULL
);
}
void
efi_virtmap_load
(
void
);
void
efi_virtmap_unload
(
void
);
#else
#define efi_init()
#endif
/* CONFIG_EFI */
/* arch specific definitions used by the stub code */
#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
/*
* A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
* so we will reserve that amount of memory. We have no easy way to tell what
* the actuall size of code + data the uncompressed kernel will use.
* If this is insufficient, the decompressor will relocate itself out of the
* way before performing the decompression.
*/
#define MAX_UNCOMP_KERNEL_SIZE SZ_32M
/*
* The kernel zImage should preferably be located between 32 MB and 128 MB
* from the base of DRAM. The min address leaves space for a maximal size
* uncompressed image, and the max address is due to how the zImage decompressor
* picks a destination address.
*/
#define ZIMAGE_OFFSET_LIMIT SZ_128M
#define MIN_ZIMAGE_OFFSET MAX_UNCOMP_KERNEL_SIZE
#define MAX_FDT_OFFSET ZIMAGE_OFFSET_LIMIT
#endif
/* _ASM_ARM_EFI_H */
arch/arm/include/asm/fixmap.h
View file @
6660800f
...
...
@@ -19,20 +19,47 @@ enum fixed_addresses {
FIX_TEXT_POKE0
,
FIX_TEXT_POKE1
,
__end_of_fixed_addresses
__end_of_fixmap_region
,
/*
* Share the kmap() region with early_ioremap(): this is guaranteed
* not to clash since early_ioremap() is only available before
* paging_init(), and kmap() only after.
*/
#define NR_FIX_BTMAPS 32
#define FIX_BTMAPS_SLOTS 7
#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
FIX_BTMAP_END
=
__end_of_permanent_fixed_addresses
,
FIX_BTMAP_BEGIN
=
FIX_BTMAP_END
+
TOTAL_FIX_BTMAPS
-
1
,
__end_of_early_ioremap_region
};
static
const
enum
fixed_addresses
__end_of_fixed_addresses
=
__end_of_fixmap_region
>
__end_of_early_ioremap_region
?
__end_of_fixmap_region
:
__end_of_early_ioremap_region
;
#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
#define FIXMAP_PAGE_RO (FIXMAP_PAGE_NORMAL | L_PTE_RDONLY)
/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
#define __early_set_fixmap __set_fixmap
#ifdef CONFIG_MMU
void
__set_fixmap
(
enum
fixed_addresses
idx
,
phys_addr_t
phys
,
pgprot_t
prot
);
void
__init
early_fixmap_init
(
void
);
#include <asm-generic/fixmap.h>
#else
static
inline
void
early_fixmap_init
(
void
)
{
}
#endif
#endif
arch/arm/include/asm/mach/map.h
View file @
6660800f
...
...
@@ -42,6 +42,8 @@ enum {
extern
void
iotable_init
(
struct
map_desc
*
,
int
);
extern
void
vm_reserve_area_early
(
unsigned
long
addr
,
unsigned
long
size
,
void
*
caller
);
extern
void
create_mapping_late
(
struct
mm_struct
*
mm
,
struct
map_desc
*
md
,
bool
ng
);
#ifdef CONFIG_DEBUG_LL
extern
void
debug_ll_addr
(
unsigned
long
*
paddr
,
unsigned
long
*
vaddr
);
...
...
arch/arm/include/asm/mmu_context.h
View file @
6660800f
...
...
@@ -26,7 +26,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void
check_and_switch_context
(
struct
mm_struct
*
mm
,
struct
task_struct
*
tsk
);
#define init_new_context(tsk,mm) ({ atomic64_set(&
mm
->context.id, 0); 0; })
#define init_new_context(tsk,mm) ({ atomic64_set(&
(mm)
->context.id, 0); 0; })
#ifdef CONFIG_ARM_ERRATA_798181
void
a15_erratum_get_cpumask
(
int
this_cpu
,
struct
mm_struct
*
mm
,
...
...
arch/arm/kernel/Makefile
View file @
6660800f
...
...
@@ -76,6 +76,7 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
AFLAGS_iwmmxt.o
:=
-Wa
,-mcpu
=
iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY)
+=
topology.o
obj-$(CONFIG_VDSO)
+=
vdso.o
obj-$(CONFIG_EFI)
+=
efi.o
ifneq
($(CONFIG_ARCH_EBSA110),y)
obj-y
+=
io.o
...
...
arch/arm/kernel/efi.c
0 → 100644
View file @
6660800f
/*
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/efi.h>
#include <asm/efi.h>
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
int
__init
efi_create_mapping
(
struct
mm_struct
*
mm
,
efi_memory_desc_t
*
md
)
{
struct
map_desc
desc
=
{
.
virtual
=
md
->
virt_addr
,
.
pfn
=
__phys_to_pfn
(
md
->
phys_addr
),
.
length
=
md
->
num_pages
*
EFI_PAGE_SIZE
,
};
/*
* Order is important here: memory regions may have all of the
* bits below set (and usually do), so we check them in order of
* preference.
*/
if
(
md
->
attribute
&
EFI_MEMORY_WB
)
desc
.
type
=
MT_MEMORY_RWX
;
else
if
(
md
->
attribute
&
EFI_MEMORY_WT
)
desc
.
type
=
MT_MEMORY_RWX_NONCACHED
;
else
if
(
md
->
attribute
&
EFI_MEMORY_WC
)
desc
.
type
=
MT_DEVICE_WC
;
else
desc
.
type
=
MT_DEVICE
;
create_mapping_late
(
mm
,
&
desc
,
true
);
return
0
;
}
arch/arm/kernel/setup.c
View file @
6660800f
...
...
@@ -7,6 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
...
...
@@ -37,7 +38,9 @@
#include <asm/cp15.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/efi.h>
#include <asm/elf.h>
#include <asm/early_ioremap.h>
#include <asm/fixmap.h>
#include <asm/procinfo.h>
#include <asm/psci.h>
...
...
@@ -1023,8 +1026,8 @@ void __init setup_arch(char **cmdline_p)
strlcpy
(
cmd_line
,
boot_command_line
,
COMMAND_LINE_SIZE
);
*
cmdline_p
=
cmd_line
;
if
(
IS_ENABLED
(
CONFIG_FIX_EARLYCON_MEM
))
early_fixmap_init
();
early_ioremap_init
();
parse_early_param
();
...
...
@@ -1032,9 +1035,12 @@ void __init setup_arch(char **cmdline_p)
early_paging_init
(
mdesc
);
#endif
setup_dma_zone
(
mdesc
);
efi_init
();
sanity_check_meminfo
();
arm_memblock_init
(
mdesc
);
early_ioremap_reset
();
paging_init
(
mdesc
);
request_standard_resources
(
mdesc
);
...
...
arch/arm/mm/init.c
View file @
6660800f
...
...
@@ -192,7 +192,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int
pfn_valid
(
unsigned
long
pfn
)
{
return
memblock_is_memory
(
__pfn_to_phys
(
pfn
));
return
memblock_is_m
ap_m
emory
(
__pfn_to_phys
(
pfn
));
}
EXPORT_SYMBOL
(
pfn_valid
);
#endif
...
...
@@ -433,6 +433,9 @@ static void __init free_highpages(void)
if
(
end
<=
max_low
)
continue
;
if
(
memblock_is_nomap
(
mem
))
continue
;
/* Truncate partial highmem entries */
if
(
start
<
max_low
)
start
=
max_low
;
...
...
arch/arm/mm/ioremap.c
View file @
6660800f
...
...
@@ -30,6 +30,7 @@
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
#include <asm/early_ioremap.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
...
...
@@ -469,3 +470,11 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
}
EXPORT_SYMBOL_GPL
(
pci_ioremap_io
);
#endif
/*
* Must be called after early_fixmap_init
*/
void
__init
early_ioremap_init
(
void
)
{
early_ioremap_setup
();
}
arch/arm/mm/mmu.c
View file @
6660800f
...
...
@@ -390,7 +390,7 @@ void __init early_fixmap_init(void)
* The early fixmap range spans multiple pmds, for which
* we are not prepared:
*/
BUILD_BUG_ON
((
__fix_to_virt
(
__end_of_
permanent_fixed_addresses
)
>>
PMD_SHIFT
)
BUILD_BUG_ON
((
__fix_to_virt
(
__end_of_
early_ioremap_region
)
>>
PMD_SHIFT
)
!=
FIXADDR_TOP
>>
PMD_SHIFT
);
pmd
=
fixmap_pmd
(
FIXADDR_TOP
);
...
...
@@ -724,30 +724,49 @@ static void __init *early_alloc(unsigned long sz)
return
early_alloc_aligned
(
sz
,
sz
);
}
static
pte_t
*
__init
early_pte_alloc
(
pmd_t
*
pmd
,
unsigned
long
addr
,
unsigned
long
prot
)
static
void
*
__init
late_alloc
(
unsigned
long
sz
)
{
void
*
ptr
=
(
void
*
)
__get_free_pages
(
PGALLOC_GFP
,
get_order
(
sz
));
BUG_ON
(
!
ptr
);
return
ptr
;
}
static
pte_t
*
__init
pte_alloc
(
pmd_t
*
pmd
,
unsigned
long
addr
,
unsigned
long
prot
,
void
*
(
*
alloc
)(
unsigned
long
sz
))
{
if
(
pmd_none
(
*
pmd
))
{
pte_t
*
pte
=
early_
alloc
(
PTE_HWTABLE_OFF
+
PTE_HWTABLE_SIZE
);
pte_t
*
pte
=
alloc
(
PTE_HWTABLE_OFF
+
PTE_HWTABLE_SIZE
);
__pmd_populate
(
pmd
,
__pa
(
pte
),
prot
);
}
BUG_ON
(
pmd_bad
(
*
pmd
));
return
pte_offset_kernel
(
pmd
,
addr
);
}
static
pte_t
*
__init
early_pte_alloc
(
pmd_t
*
pmd
,
unsigned
long
addr
,
unsigned
long
prot
)
{
return
pte_alloc
(
pmd
,
addr
,
prot
,
early_alloc
);
}
static
void
__init
alloc_init_pte
(
pmd_t
*
pmd
,
unsigned
long
addr
,
unsigned
long
end
,
unsigned
long
pfn
,
const
struct
mem_type
*
type
)
const
struct
mem_type
*
type
,
void
*
(
*
alloc
)(
unsigned
long
sz
),
bool
ng
)
{
pte_t
*
pte
=
early_pte_alloc
(
pmd
,
addr
,
type
->
prot_l1
);
pte_t
*
pte
=
pte_alloc
(
pmd
,
addr
,
type
->
prot_l1
,
alloc
);
do
{
set_pte_ext
(
pte
,
pfn_pte
(
pfn
,
__pgprot
(
type
->
prot_pte
)),
0
);
set_pte_ext
(
pte
,
pfn_pte
(
pfn
,
__pgprot
(
type
->
prot_pte
)),
ng
?
PTE_EXT_NG
:
0
);
pfn
++
;
}
while
(
pte
++
,
addr
+=
PAGE_SIZE
,
addr
!=
end
);
}
static
void
__init
__map_init_section
(
pmd_t
*
pmd
,
unsigned
long
addr
,
unsigned
long
end
,
phys_addr_t
phys
,
const
struct
mem_type
*
type
)
const
struct
mem_type
*
type
,
bool
ng
)
{
pmd_t
*
p
=
pmd
;
...
...
@@ -765,7 +784,7 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
pmd
++
;
#endif
do
{
*
pmd
=
__pmd
(
phys
|
type
->
prot_sect
);
*
pmd
=
__pmd
(
phys
|
type
->
prot_sect
|
(
ng
?
PMD_SECT_nG
:
0
)
);
phys
+=
SECTION_SIZE
;
}
while
(
pmd
++
,
addr
+=
SECTION_SIZE
,
addr
!=
end
);
...
...
@@ -774,7 +793,8 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
static
void
__init
alloc_init_pmd
(
pud_t
*
pud
,
unsigned
long
addr
,
unsigned
long
end
,
phys_addr_t
phys
,
const
struct
mem_type
*
type
)
const
struct
mem_type
*
type
,
void
*
(
*
alloc
)(
unsigned
long
sz
),
bool
ng
)
{
pmd_t
*
pmd
=
pmd_offset
(
pud
,
addr
);
unsigned
long
next
;
...
...
@@ -792,10 +812,10 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
*/
if
(
type
->
prot_sect
&&
((
addr
|
next
|
phys
)
&
~
SECTION_MASK
)
==
0
)
{
__map_init_section
(
pmd
,
addr
,
next
,
phys
,
type
);
__map_init_section
(
pmd
,
addr
,
next
,
phys
,
type
,
ng
);
}
else
{
alloc_init_pte
(
pmd
,
addr
,
next
,
__phys_to_pfn
(
phys
),
type
);
__phys_to_pfn
(
phys
),
type
,
alloc
,
ng
);
}
phys
+=
next
-
addr
;
...
...
@@ -805,21 +825,24 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
static
void
__init
alloc_init_pud
(
pgd_t
*
pgd
,
unsigned
long
addr
,
unsigned
long
end
,
phys_addr_t
phys
,
const
struct
mem_type
*
type
)
const
struct
mem_type
*
type
,
void
*
(
*
alloc
)(
unsigned
long
sz
),
bool
ng
)
{
pud_t
*
pud
=
pud_offset
(
pgd
,
addr
);
unsigned
long
next
;
do
{
next
=
pud_addr_end
(
addr
,
end
);
alloc_init_pmd
(
pud
,
addr
,
next
,
phys
,
type
);
alloc_init_pmd
(
pud
,
addr
,
next
,
phys
,
type
,
alloc
,
ng
);
phys
+=
next
-
addr
;
}
while
(
pud
++
,
addr
=
next
,
addr
!=
end
);
}
#ifndef CONFIG_ARM_LPAE
static
void
__init
create_36bit_mapping
(
struct
map_desc
*
md
,
const
struct
mem_type
*
type
)
static
void
__init
create_36bit_mapping
(
struct
mm_struct
*
mm
,
struct
map_desc
*
md
,
const
struct
mem_type
*
type
,
bool
ng
)
{
unsigned
long
addr
,
length
,
end
;
phys_addr_t
phys
;
...
...
@@ -859,7 +882,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
*/
phys
|=
(((
md
->
pfn
>>
(
32
-
PAGE_SHIFT
))
&
0xF
)
<<
20
);
pgd
=
pgd_offset
_k
(
addr
);
pgd
=
pgd_offset
(
mm
,
addr
);
end
=
addr
+
length
;
do
{
pud_t
*
pud
=
pud_offset
(
pgd
,
addr
);
...
...
@@ -867,7 +890,8 @@ static void __init create_36bit_mapping(struct map_desc *md,
int
i
;
for
(
i
=
0
;
i
<
16
;
i
++
)
*
pmd
++
=
__pmd
(
phys
|
type
->
prot_sect
|
PMD_SECT_SUPER
);
*
pmd
++
=
__pmd
(
phys
|
type
->
prot_sect
|
PMD_SECT_SUPER
|
(
ng
?
PMD_SECT_nG
:
0
));
addr
+=
SUPERSECTION_SIZE
;
phys
+=
SUPERSECTION_SIZE
;
...
...
@@ -876,33 +900,15 @@ static void __init create_36bit_mapping(struct map_desc *md,
}
#endif
/* !CONFIG_ARM_LPAE */
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
* are able to cope here with varying sizes and address
* offsets, and we take full advantage of sections and
* supersections.
*/
static
void
__init
create_mapping
(
struct
map_desc
*
md
)
static
void
__init
__create_mapping
(
struct
mm_struct
*
mm
,
struct
map_desc
*
md
,
void
*
(
*
alloc
)(
unsigned
long
sz
),
bool
ng
)
{
unsigned
long
addr
,
length
,
end
;
phys_addr_t
phys
;
const
struct
mem_type
*
type
;
pgd_t
*
pgd
;
if
(
md
->
virtual
!=
vectors_base
()
&&
md
->
virtual
<
TASK_SIZE
)
{
pr_warn
(
"BUG: not creating mapping for 0x%08llx at 0x%08lx in user region
\n
"
,
(
long
long
)
__pfn_to_phys
((
u64
)
md
->
pfn
),
md
->
virtual
);
return
;
}
if
((
md
->
type
==
MT_DEVICE
||
md
->
type
==
MT_ROM
)
&&
md
->
virtual
>=
PAGE_OFFSET
&&
md
->
virtual
<
FIXADDR_START
&&
(
md
->
virtual
<
VMALLOC_START
||
md
->
virtual
>=
VMALLOC_END
))
{
pr_warn
(
"BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space
\n
"
,
(
long
long
)
__pfn_to_phys
((
u64
)
md
->
pfn
),
md
->
virtual
);
}
type
=
&
mem_types
[
md
->
type
];
#ifndef CONFIG_ARM_LPAE
...
...
@@ -910,7 +916,7 @@ static void __init create_mapping(struct map_desc *md)
* Catch 36-bit addresses
*/
if
(
md
->
pfn
>=
0x100000
)
{
create_36bit_mapping
(
m
d
,
type
);
create_36bit_mapping
(
m
m
,
md
,
type
,
ng
);
return
;
}
#endif
...
...
@@ -925,18 +931,55 @@ static void __init create_mapping(struct map_desc *md)
return
;
}
pgd
=
pgd_offset
_k
(
addr
);
pgd
=
pgd_offset
(
mm
,
addr
);
end
=
addr
+
length
;
do
{
unsigned
long
next
=
pgd_addr_end
(
addr
,
end
);
alloc_init_pud
(
pgd
,
addr
,
next
,
phys
,
type
);
alloc_init_pud
(
pgd
,
addr
,
next
,
phys
,
type
,
alloc
,
ng
);
phys
+=
next
-
addr
;
addr
=
next
;
}
while
(
pgd
++
,
addr
!=
end
);
}
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
* are able to cope here with varying sizes and address
* offsets, and we take full advantage of sections and
* supersections.
*/
static
void
__init
create_mapping
(
struct
map_desc
*
md
)
{
if
(
md
->
virtual
!=
vectors_base
()
&&
md
->
virtual
<
TASK_SIZE
)
{
pr_warn
(
"BUG: not creating mapping for 0x%08llx at 0x%08lx in user region
\n
"
,
(
long
long
)
__pfn_to_phys
((
u64
)
md
->
pfn
),
md
->
virtual
);
return
;
}
if
((
md
->
type
==
MT_DEVICE
||
md
->
type
==
MT_ROM
)
&&
md
->
virtual
>=
PAGE_OFFSET
&&
md
->
virtual
<
FIXADDR_START
&&
(
md
->
virtual
<
VMALLOC_START
||
md
->
virtual
>=
VMALLOC_END
))
{
pr_warn
(
"BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space
\n
"
,
(
long
long
)
__pfn_to_phys
((
u64
)
md
->
pfn
),
md
->
virtual
);
}
__create_mapping
(
&
init_mm
,
md
,
early_alloc
,
false
);
}
void
__init
create_mapping_late
(
struct
mm_struct
*
mm
,
struct
map_desc
*
md
,
bool
ng
)
{
#ifdef CONFIG_ARM_LPAE
pud_t
*
pud
=
pud_alloc
(
mm
,
pgd_offset
(
mm
,
md
->
virtual
),
md
->
virtual
);
if
(
WARN_ON
(
!
pud
))
return
;
pmd_alloc
(
mm
,
pud
,
0
);
#endif
__create_mapping
(
mm
,
md
,
late_alloc
,
ng
);
}
/*
* Create the architecture specific mappings
*/
...
...
@@ -1392,6 +1435,9 @@ static void __init map_lowmem(void)
phys_addr_t
end
=
start
+
reg
->
size
;
struct
map_desc
map
;
if
(
memblock_is_nomap
(
reg
))
continue
;
if
(
end
>
arm_lowmem_limit
)
end
=
arm_lowmem_limit
;
if
(
start
>=
end
)
...
...
arch/arm64/include/asm/efi.h
View file @
6660800f
...
...
@@ -2,7 +2,9 @@
#define _ASM_EFI_H
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/neon.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_EFI
extern
void
efi_init
(
void
);
...
...
@@ -10,6 +12,8 @@ extern void efi_init(void);
#define efi_init()
#endif
int
efi_create_mapping
(
struct
mm_struct
*
mm
,
efi_memory_desc_t
*
md
);
#define efi_call_virt(f, ...) \
({ \
efi_##f##_t *__f; \
...
...
@@ -63,6 +67,11 @@ extern void efi_init(void);
* Services are enabled and the EFI_RUNTIME_SERVICES bit set.
*/
static
inline
void
efi_set_pgd
(
struct
mm_struct
*
mm
)
{
switch_mm
(
NULL
,
mm
,
NULL
);
}
void
efi_virtmap_load
(
void
);
void
efi_virtmap_unload
(
void
);
...
...
arch/arm64/kernel/efi.c
View file @
6660800f
...
...
@@ -11,317 +11,34 @@
*
*/
#include <linux/atomic.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/bootmem.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/preempt.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
struct
efi_memory_map
memmap
;
static
u64
efi_system_table
;
static
pgd_t
efi_pgd
[
PTRS_PER_PGD
]
__page_aligned_bss
;
static
struct
mm_struct
efi_mm
=
{
.
mm_rb
=
RB_ROOT
,
.
pgd
=
efi_pgd
,
.
mm_users
=
ATOMIC_INIT
(
2
),
.
mm_count
=
ATOMIC_INIT
(
1
),
.
mmap_sem
=
__RWSEM_INITIALIZER
(
efi_mm
.
mmap_sem
),
.
page_table_lock
=
__SPIN_LOCK_UNLOCKED
(
efi_mm
.
page_table_lock
),
.
mmlist
=
LIST_HEAD_INIT
(
efi_mm
.
mmlist
),
};
static
int
__init
is_normal_ram
(
efi_memory_desc_t
*
md
)
int
__init
efi_create_mapping
(
struct
mm_struct
*
mm
,
efi_memory_desc_t
*
md
)
{
if
(
md
->
attribute
&
EFI_MEMORY_WB
)
return
1
;
return
0
;
}
/*
* Translate a EFI virtual address into a physical address: this is necessary,
* as some data members of the EFI system table are virtually remapped after
* SetVirtualAddressMap() has been called.
*/
static
phys_addr_t
efi_to_phys
(
unsigned
long
addr
)
{
efi_memory_desc_t
*
md
;
for_each_efi_memory_desc
(
&
memmap
,
md
)
{
if
(
!
(
md
->
attribute
&
EFI_MEMORY_RUNTIME
))
continue
;
if
(
md
->
virt_addr
==
0
)
/* no virtual mapping has been installed by the stub */
break
;
if
(
md
->
virt_addr
<=
addr
&&
(
addr
-
md
->
virt_addr
)
<
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
))
return
md
->
phys_addr
+
addr
-
md
->
virt_addr
;
}
return
addr
;
}
static
int
__init
uefi_init
(
void
)
{
efi_char16_t
*
c16
;
void
*
config_tables
;
u64
table_size
;
char
vendor
[
100
]
=
"unknown"
;
int
i
,
retval
;
efi
.
systab
=
early_memremap
(
efi_system_table
,
sizeof
(
efi_system_table_t
));
if
(
efi
.
systab
==
NULL
)
{
pr_warn
(
"Unable to map EFI system table.
\n
"
);
return
-
ENOMEM
;
}
set_bit
(
EFI_BOOT
,
&
efi
.
flags
);
set_bit
(
EFI_64BIT
,
&
efi
.
flags
);
/*
* Verify the EFI Table
*/
if
(
efi
.
systab
->
hdr
.
signature
!=
EFI_SYSTEM_TABLE_SIGNATURE
)
{
pr_err
(
"System table signature incorrect
\n
"
);
retval
=
-
EINVAL
;
goto
out
;
}
if
((
efi
.
systab
->
hdr
.
revision
>>
16
)
<
2
)
pr_warn
(
"Warning: EFI system table version %d.%02d, expected 2.00 or greater
\n
"
,
efi
.
systab
->
hdr
.
revision
>>
16
,
efi
.
systab
->
hdr
.
revision
&
0xffff
);
/* Show what we know for posterity */
c16
=
early_memremap
(
efi_to_phys
(
efi
.
systab
->
fw_vendor
),
sizeof
(
vendor
)
*
sizeof
(
efi_char16_t
));
if
(
c16
)
{
for
(
i
=
0
;
i
<
(
int
)
sizeof
(
vendor
)
-
1
&&
*
c16
;
++
i
)
vendor
[
i
]
=
c16
[
i
];
vendor
[
i
]
=
'\0'
;
early_memunmap
(
c16
,
sizeof
(
vendor
)
*
sizeof
(
efi_char16_t
));
}
pr_info
(
"EFI v%u.%.02u by %s
\n
"
,
efi
.
systab
->
hdr
.
revision
>>
16
,
efi
.
systab
->
hdr
.
revision
&
0xffff
,
vendor
);
table_size
=
sizeof
(
efi_config_table_64_t
)
*
efi
.
systab
->
nr_tables
;
config_tables
=
early_memremap
(
efi_to_phys
(
efi
.
systab
->
tables
),
table_size
);
if
(
config_tables
==
NULL
)
{
pr_warn
(
"Unable to map EFI config table array.
\n
"
);
retval
=
-
ENOMEM
;
goto
out
;
}
retval
=
efi_config_parse_tables
(
config_tables
,
efi
.
systab
->
nr_tables
,
sizeof
(
efi_config_table_64_t
),
NULL
);
early_memunmap
(
config_tables
,
table_size
);
out:
early_memunmap
(
efi
.
systab
,
sizeof
(
efi_system_table_t
));
return
retval
;
}
/*
* Return true for RAM regions we want to permanently reserve.
*/
static
__init
int
is_reserve_region
(
efi_memory_desc_t
*
md
)
{
switch
(
md
->
type
)
{
case
EFI_LOADER_CODE
:
case
EFI_LOADER_DATA
:
case
EFI_BOOT_SERVICES_CODE
:
case
EFI_BOOT_SERVICES_DATA
:
case
EFI_CONVENTIONAL_MEMORY
:
case
EFI_PERSISTENT_MEMORY
:
return
0
;
default:
break
;
}
return
is_normal_ram
(
md
);
}
static
__init
void
reserve_regions
(
void
)
{
efi_memory_desc_t
*
md
;
u64
paddr
,
npages
,
size
;
if
(
efi_enabled
(
EFI_DBG
))
pr_info
(
"Processing EFI memory map:
\n
"
);
for_each_efi_memory_desc
(
&
memmap
,
md
)
{
paddr
=
md
->
phys_addr
;
npages
=
md
->
num_pages
;
if
(
efi_enabled
(
EFI_DBG
))
{
char
buf
[
64
];
pr_info
(
" 0x%012llx-0x%012llx %s"
,
paddr
,
paddr
+
(
npages
<<
EFI_PAGE_SHIFT
)
-
1
,
efi_md_typeattr_format
(
buf
,
sizeof
(
buf
),
md
));
}
memrange_efi_to_native
(
&
paddr
,
&
npages
);
size
=
npages
<<
PAGE_SHIFT
;
if
(
is_normal_ram
(
md
))
early_init_dt_add_memory_arch
(
paddr
,
size
);
if
(
is_reserve_region
(
md
))
{
memblock_reserve
(
paddr
,
size
);
if
(
efi_enabled
(
EFI_DBG
))
pr_cont
(
"*"
);
}
if
(
efi_enabled
(
EFI_DBG
))
pr_cont
(
"
\n
"
);
}
set_bit
(
EFI_MEMMAP
,
&
efi
.
flags
);
}
void
__init
efi_init
(
void
)
{
struct
efi_fdt_params
params
;
/* Grab UEFI information placed in FDT by stub */
if
(
!
efi_get_fdt_params
(
&
params
))
return
;
efi_system_table
=
params
.
system_table
;
memblock_reserve
(
params
.
mmap
&
PAGE_MASK
,
PAGE_ALIGN
(
params
.
mmap_size
+
(
params
.
mmap
&
~
PAGE_MASK
)));
memmap
.
phys_map
=
params
.
mmap
;
memmap
.
map
=
early_memremap
(
params
.
mmap
,
params
.
mmap_size
);
if
(
memmap
.
map
==
NULL
)
{
/*
* If we are booting via UEFI, the UEFI memory map is the only
* description of memory we have, so there is little point in
* proceeding if we cannot access it.
*/
panic
(
"Unable to map EFI memory map.
\n
"
);
}
memmap
.
map_end
=
memmap
.
map
+
params
.
mmap_size
;
memmap
.
desc_size
=
params
.
desc_size
;
memmap
.
desc_version
=
params
.
desc_ver
;
if
(
uefi_init
()
<
0
)
return
;
reserve_regions
();
early_memunmap
(
memmap
.
map
,
params
.
mmap_size
);
}
static
bool
__init
efi_virtmap_init
(
void
)
{
efi_memory_desc_t
*
md
;
init_new_context
(
NULL
,
&
efi_mm
);
for_each_efi_memory_desc
(
&
memmap
,
md
)
{
pgprot_t
prot
;
if
(
!
(
md
->
attribute
&
EFI_MEMORY_RUNTIME
))
continue
;
if
(
md
->
virt_addr
==
0
)
return
false
;
pr_info
(
" EFI remap 0x%016llx => %p
\n
"
,
md
->
phys_addr
,
(
void
*
)
md
->
virt_addr
);
pteval_t
prot_val
;
/*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits
* set.
*/
if
(
!
is_normal_ram
(
md
)
)
prot
=
__pgprot
(
PROT_DEVICE_nGnRE
)
;
if
((
md
->
attribute
&
EFI_MEMORY_WB
)
==
0
)
prot_val
=
PROT_DEVICE_nGnRE
;
else
if
(
md
->
type
==
EFI_RUNTIME_SERVICES_CODE
||
!
PAGE_ALIGNED
(
md
->
phys_addr
))
prot
=
PAGE_KERNEL_EXEC
;
prot_val
=
pgprot_val
(
PAGE_KERNEL_EXEC
)
;
else
prot
=
PAGE_KERNEL
;
prot_val
=
pgprot_val
(
PAGE_KERNEL
)
;
create_pgd_mapping
(
&
efi_
mm
,
md
->
phys_addr
,
md
->
virt_addr
,
create_pgd_mapping
(
mm
,
md
->
phys_addr
,
md
->
virt_addr
,
md
->
num_pages
<<
EFI_PAGE_SHIFT
,
__pgprot
(
pgprot_val
(
prot
)
|
PTE_NG
));
}
return
true
;
}
/*
* Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
* non-early mapping of the UEFI system table and virtual mappings for all
* EFI_MEMORY_RUNTIME regions.
*/
static
int
__init
arm64_enable_runtime_services
(
void
)
{
u64
mapsize
;
if
(
!
efi_enabled
(
EFI_BOOT
))
{
pr_info
(
"EFI services will not be available.
\n
"
);
return
0
;
}
if
(
efi_runtime_disabled
())
{
pr_info
(
"EFI runtime services will be disabled.
\n
"
);
return
0
;
}
pr_info
(
"Remapping and enabling EFI services.
\n
"
);
mapsize
=
memmap
.
map_end
-
memmap
.
map
;
memmap
.
map
=
(
__force
void
*
)
ioremap_cache
(
memmap
.
phys_map
,
mapsize
);
if
(
!
memmap
.
map
)
{
pr_err
(
"Failed to remap EFI memory map
\n
"
);
return
-
ENOMEM
;
}
memmap
.
map_end
=
memmap
.
map
+
mapsize
;
efi
.
memmap
=
&
memmap
;
efi
.
systab
=
(
__force
void
*
)
ioremap_cache
(
efi_system_table
,
sizeof
(
efi_system_table_t
));
if
(
!
efi
.
systab
)
{
pr_err
(
"Failed to remap EFI System Table
\n
"
);
return
-
ENOMEM
;
}
set_bit
(
EFI_SYSTEM_TABLES
,
&
efi
.
flags
);
if
(
!
efi_virtmap_init
())
{
pr_err
(
"No UEFI virtual mapping was installed -- runtime services will not be available
\n
"
);
return
-
ENOMEM
;
}
/* Set up runtime services function pointers */
efi_native_runtime_setup
();
set_bit
(
EFI_RUNTIME_SERVICES
,
&
efi
.
flags
);
efi
.
runtime_version
=
efi
.
systab
->
hdr
.
revision
;
__pgprot
(
prot_val
|
PTE_NG
));
return
0
;
}
early_initcall
(
arm64_enable_runtime_services
);
static
int
__init
arm64_dmi_init
(
void
)
{
...
...
@@ -337,23 +54,6 @@ static int __init arm64_dmi_init(void)
}
core_initcall
(
arm64_dmi_init
);
static
void
efi_set_pgd
(
struct
mm_struct
*
mm
)
{
switch_mm
(
NULL
,
mm
,
NULL
);
}
void
efi_virtmap_load
(
void
)
{
preempt_disable
();
efi_set_pgd
(
&
efi_mm
);
}
void
efi_virtmap_unload
(
void
)
{
efi_set_pgd
(
current
->
active_mm
);
preempt_enable
();
}
/*
* UpdateCapsule() depends on the system being shutdown via
* ResetSystem().
...
...
arch/arm64/mm/init.c
View file @
6660800f
...
...
@@ -120,7 +120,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int
pfn_valid
(
unsigned
long
pfn
)
{
return
memblock_is_memory
(
pfn
<<
PAGE_SHIFT
);
return
memblock_is_m
ap_m
emory
(
pfn
<<
PAGE_SHIFT
);
}
EXPORT_SYMBOL
(
pfn_valid
);
#endif
...
...
arch/arm64/mm/mmu.c
View file @
6660800f
...
...
@@ -372,6 +372,8 @@ static void __init map_mem(void)
if
(
start
>=
end
)
break
;
if
(
memblock_is_nomap
(
reg
))
continue
;
if
(
ARM64_SWAPPER_USES_SECTION_MAPS
)
{
/*
...
...
drivers/firmware/efi/Makefile
View file @
6660800f
...
...
@@ -18,3 +18,7 @@ obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
obj-$(CONFIG_EFI_RUNTIME_WRAPPERS)
+=
runtime-wrappers.o
obj-$(CONFIG_EFI_STUB)
+=
libstub/
obj-$(CONFIG_EFI_FAKE_MEMMAP)
+=
fake_mem.o
arm-obj-$(CONFIG_EFI)
:=
arm-init.o arm-runtime.o
obj-$(CONFIG_ARM)
+=
$
(
arm-obj-y
)
obj-$(CONFIG_ARM64)
+=
$
(
arm-obj-y
)
drivers/firmware/efi/arm-init.c
0 → 100644
View file @
6660800f
/*
* Extensible Firmware Interface
*
* Based on Extensible Firmware Interface Specification version 2.4
*
* Copyright (C) 2013 - 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/efi.h>
struct
efi_memory_map
memmap
;
u64
efi_system_table
;
static
int
__init
is_normal_ram
(
efi_memory_desc_t
*
md
)
{
if
(
md
->
attribute
&
EFI_MEMORY_WB
)
return
1
;
return
0
;
}
/*
* Translate a EFI virtual address into a physical address: this is necessary,
* as some data members of the EFI system table are virtually remapped after
* SetVirtualAddressMap() has been called.
*/
static
phys_addr_t
efi_to_phys
(
unsigned
long
addr
)
{
efi_memory_desc_t
*
md
;
for_each_efi_memory_desc
(
&
memmap
,
md
)
{
if
(
!
(
md
->
attribute
&
EFI_MEMORY_RUNTIME
))
continue
;
if
(
md
->
virt_addr
==
0
)
/* no virtual mapping has been installed by the stub */
break
;
if
(
md
->
virt_addr
<=
addr
&&
(
addr
-
md
->
virt_addr
)
<
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
))
return
md
->
phys_addr
+
addr
-
md
->
virt_addr
;
}
return
addr
;
}
static
int
__init
uefi_init
(
void
)
{
efi_char16_t
*
c16
;
void
*
config_tables
;
size_t
table_size
;
char
vendor
[
100
]
=
"unknown"
;
int
i
,
retval
;
efi
.
systab
=
early_memremap
(
efi_system_table
,
sizeof
(
efi_system_table_t
));
if
(
efi
.
systab
==
NULL
)
{
pr_warn
(
"Unable to map EFI system table.
\n
"
);
return
-
ENOMEM
;
}
set_bit
(
EFI_BOOT
,
&
efi
.
flags
);
if
(
IS_ENABLED
(
CONFIG_64BIT
))
set_bit
(
EFI_64BIT
,
&
efi
.
flags
);
/*
* Verify the EFI Table
*/
if
(
efi
.
systab
->
hdr
.
signature
!=
EFI_SYSTEM_TABLE_SIGNATURE
)
{
pr_err
(
"System table signature incorrect
\n
"
);
retval
=
-
EINVAL
;
goto
out
;
}
if
((
efi
.
systab
->
hdr
.
revision
>>
16
)
<
2
)
pr_warn
(
"Warning: EFI system table version %d.%02d, expected 2.00 or greater
\n
"
,
efi
.
systab
->
hdr
.
revision
>>
16
,
efi
.
systab
->
hdr
.
revision
&
0xffff
);
/* Show what we know for posterity */
c16
=
early_memremap
(
efi_to_phys
(
efi
.
systab
->
fw_vendor
),
sizeof
(
vendor
)
*
sizeof
(
efi_char16_t
));
if
(
c16
)
{
for
(
i
=
0
;
i
<
(
int
)
sizeof
(
vendor
)
-
1
&&
*
c16
;
++
i
)
vendor
[
i
]
=
c16
[
i
];
vendor
[
i
]
=
'\0'
;
early_memunmap
(
c16
,
sizeof
(
vendor
)
*
sizeof
(
efi_char16_t
));
}
pr_info
(
"EFI v%u.%.02u by %s
\n
"
,
efi
.
systab
->
hdr
.
revision
>>
16
,
efi
.
systab
->
hdr
.
revision
&
0xffff
,
vendor
);
table_size
=
sizeof
(
efi_config_table_64_t
)
*
efi
.
systab
->
nr_tables
;
config_tables
=
early_memremap
(
efi_to_phys
(
efi
.
systab
->
tables
),
table_size
);
if
(
config_tables
==
NULL
)
{
pr_warn
(
"Unable to map EFI config table array.
\n
"
);
retval
=
-
ENOMEM
;
goto
out
;
}
retval
=
efi_config_parse_tables
(
config_tables
,
efi
.
systab
->
nr_tables
,
sizeof
(
efi_config_table_t
),
NULL
);
early_memunmap
(
config_tables
,
table_size
);
out:
early_memunmap
(
efi
.
systab
,
sizeof
(
efi_system_table_t
));
return
retval
;
}
/*
* Return true for RAM regions we want to permanently reserve.
*/
static
__init
int
is_reserve_region
(
efi_memory_desc_t
*
md
)
{
switch
(
md
->
type
)
{
case
EFI_LOADER_CODE
:
case
EFI_LOADER_DATA
:
case
EFI_BOOT_SERVICES_CODE
:
case
EFI_BOOT_SERVICES_DATA
:
case
EFI_CONVENTIONAL_MEMORY
:
case
EFI_PERSISTENT_MEMORY
:
return
0
;
default:
break
;
}
return
is_normal_ram
(
md
);
}
static
__init
void
reserve_regions
(
void
)
{
efi_memory_desc_t
*
md
;
u64
paddr
,
npages
,
size
;
if
(
efi_enabled
(
EFI_DBG
))
pr_info
(
"Processing EFI memory map:
\n
"
);
for_each_efi_memory_desc
(
&
memmap
,
md
)
{
paddr
=
md
->
phys_addr
;
npages
=
md
->
num_pages
;
if
(
efi_enabled
(
EFI_DBG
))
{
char
buf
[
64
];
pr_info
(
" 0x%012llx-0x%012llx %s"
,
paddr
,
paddr
+
(
npages
<<
EFI_PAGE_SHIFT
)
-
1
,
efi_md_typeattr_format
(
buf
,
sizeof
(
buf
),
md
));
}
memrange_efi_to_native
(
&
paddr
,
&
npages
);
size
=
npages
<<
PAGE_SHIFT
;
if
(
is_normal_ram
(
md
))
early_init_dt_add_memory_arch
(
paddr
,
size
);
if
(
is_reserve_region
(
md
))
{
memblock_mark_nomap
(
paddr
,
size
);
if
(
efi_enabled
(
EFI_DBG
))
pr_cont
(
"*"
);
}
if
(
efi_enabled
(
EFI_DBG
))
pr_cont
(
"
\n
"
);
}
set_bit
(
EFI_MEMMAP
,
&
efi
.
flags
);
}
void
__init
efi_init
(
void
)
{
struct
efi_fdt_params
params
;
/* Grab UEFI information placed in FDT by stub */
if
(
!
efi_get_fdt_params
(
&
params
))
return
;
efi_system_table
=
params
.
system_table
;
memmap
.
phys_map
=
params
.
mmap
;
memmap
.
map
=
early_memremap
(
params
.
mmap
,
params
.
mmap_size
);
if
(
memmap
.
map
==
NULL
)
{
/*
* If we are booting via UEFI, the UEFI memory map is the only
* description of memory we have, so there is little point in
* proceeding if we cannot access it.
*/
panic
(
"Unable to map EFI memory map.
\n
"
);
}
memmap
.
map_end
=
memmap
.
map
+
params
.
mmap_size
;
memmap
.
desc_size
=
params
.
desc_size
;
memmap
.
desc_version
=
params
.
desc_ver
;
if
(
uefi_init
()
<
0
)
return
;
reserve_regions
();
early_memunmap
(
memmap
.
map
,
params
.
mmap_size
);
memblock_mark_nomap
(
params
.
mmap
&
PAGE_MASK
,
PAGE_ALIGN
(
params
.
mmap_size
+
(
params
.
mmap
&
~
PAGE_MASK
)));
}
drivers/firmware/efi/arm-runtime.c
0 → 100644
View file @
6660800f
/*
* Extensible Firmware Interface
*
* Based on Extensible Firmware Interface Specification version 2.4
*
* Copyright (C) 2013, 2014 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/efi.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/preempt.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/mmu.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
extern
u64
efi_system_table
;
static
struct
mm_struct
efi_mm
=
{
.
mm_rb
=
RB_ROOT
,
.
mm_users
=
ATOMIC_INIT
(
2
),
.
mm_count
=
ATOMIC_INIT
(
1
),
.
mmap_sem
=
__RWSEM_INITIALIZER
(
efi_mm
.
mmap_sem
),
.
page_table_lock
=
__SPIN_LOCK_UNLOCKED
(
efi_mm
.
page_table_lock
),
.
mmlist
=
LIST_HEAD_INIT
(
efi_mm
.
mmlist
),
};
static
bool
__init
efi_virtmap_init
(
void
)
{
efi_memory_desc_t
*
md
;
efi_mm
.
pgd
=
pgd_alloc
(
&
efi_mm
);
init_new_context
(
NULL
,
&
efi_mm
);
for_each_efi_memory_desc
(
&
memmap
,
md
)
{
phys_addr_t
phys
=
md
->
phys_addr
;
int
ret
;
if
(
!
(
md
->
attribute
&
EFI_MEMORY_RUNTIME
))
continue
;
if
(
md
->
virt_addr
==
0
)
return
false
;
ret
=
efi_create_mapping
(
&
efi_mm
,
md
);
if
(
!
ret
)
{
pr_info
(
" EFI remap %pa => %p
\n
"
,
&
phys
,
(
void
*
)(
unsigned
long
)
md
->
virt_addr
);
}
else
{
pr_warn
(
" EFI remap %pa: failed to create mapping (%d)
\n
"
,
&
phys
,
ret
);
return
false
;
}
}
return
true
;
}
/*
* Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
* non-early mapping of the UEFI system table and virtual mappings for all
* EFI_MEMORY_RUNTIME regions.
*/
static
int
__init
arm_enable_runtime_services
(
void
)
{
u64
mapsize
;
if
(
!
efi_enabled
(
EFI_BOOT
))
{
pr_info
(
"EFI services will not be available.
\n
"
);
return
0
;
}
if
(
efi_runtime_disabled
())
{
pr_info
(
"EFI runtime services will be disabled.
\n
"
);
return
0
;
}
pr_info
(
"Remapping and enabling EFI services.
\n
"
);
mapsize
=
memmap
.
map_end
-
memmap
.
map
;
memmap
.
map
=
(
__force
void
*
)
ioremap_cache
(
memmap
.
phys_map
,
mapsize
);
if
(
!
memmap
.
map
)
{
pr_err
(
"Failed to remap EFI memory map
\n
"
);
return
-
ENOMEM
;
}
memmap
.
map_end
=
memmap
.
map
+
mapsize
;
efi
.
memmap
=
&
memmap
;
efi
.
systab
=
(
__force
void
*
)
ioremap_cache
(
efi_system_table
,
sizeof
(
efi_system_table_t
));
if
(
!
efi
.
systab
)
{
pr_err
(
"Failed to remap EFI System Table
\n
"
);
return
-
ENOMEM
;
}
set_bit
(
EFI_SYSTEM_TABLES
,
&
efi
.
flags
);
if
(
!
efi_virtmap_init
())
{
pr_err
(
"No UEFI virtual mapping was installed -- runtime services will not be available
\n
"
);
return
-
ENOMEM
;
}
/* Set up runtime services function pointers */
efi_native_runtime_setup
();
set_bit
(
EFI_RUNTIME_SERVICES
,
&
efi
.
flags
);
efi
.
runtime_version
=
efi
.
systab
->
hdr
.
revision
;
return
0
;
}
early_initcall
(
arm_enable_runtime_services
);
void
efi_virtmap_load
(
void
)
{
preempt_disable
();
efi_set_pgd
(
&
efi_mm
);
}
void
efi_virtmap_unload
(
void
)
{
efi_set_pgd
(
current
->
active_mm
);
preempt_enable
();
}
drivers/firmware/efi/efi.c
View file @
6660800f
...
...
@@ -25,6 +25,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <asm/efi.h>
struct
efi
__read_mostly
efi
=
{
.
mps
=
EFI_INVALID_TABLE_ADDR
,
.
acpi
=
EFI_INVALID_TABLE_ADDR
,
...
...
drivers/firmware/efi/libstub/Makefile
View file @
6660800f
...
...
@@ -34,6 +34,7 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
lib-$(CONFIG_EFI_ARMSTUB)
+=
arm-stub.o fdt.o string.o
\
$(
patsubst
%.c,lib-%.o,
$
(
arm-deps
))
lib-$(CONFIG_ARM)
+=
arm32-stub.o
lib-$(CONFIG_ARM64)
+=
arm64-stub.o
CFLAGS_arm64-stub.o
:=
-DTEXT_OFFSET
=
$(TEXT_OFFSET)
...
...
@@ -67,3 +68,11 @@ quiet_cmd_stubcopy = STUBCPY $@
$(OBJDUMP)
-r
$@
|
grep
$
(
STUBCOPY_RELOC-y
)
\
&&
(
echo
>
&2
"
$@
: absolute symbol references not allowed in the EFI stub"
;
\
rm
-f
$@
;
/bin/false
)
;
else
/bin/false
;
fi
#
# ARM discards the .data section because it disallows r/w data in the
# decompressor. So move our .data to .data.efistub, which is preserved
# explicitly by the decompressor linker script.
#
STUBCOPY_FLAGS-$(CONFIG_ARM)
+=
--rename-section
.data
=
.data.efistub
STUBCOPY_RELOC-$(CONFIG_ARM)
:=
R_ARM_ABS
drivers/firmware/efi/libstub/arm-stub.c
View file @
6660800f
...
...
@@ -303,8 +303,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
* The value chosen is the largest non-zero power of 2 suitable for this purpose
* both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
* be mapped efficiently.
* Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split,
* map everything below 1 GB.
*/
#define EFI_RT_VIRTUAL_BASE
0x40000000
#define EFI_RT_VIRTUAL_BASE
SZ_512M
static
int
cmp_mem_desc
(
const
void
*
l
,
const
void
*
r
)
{
...
...
drivers/firmware/efi/libstub/arm32-stub.c
0 → 100644
View file @
6660800f
/*
* Copyright (C) 2013 Linaro Ltd; <roy.franz@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/efi.h>
#include <asm/efi.h>
efi_status_t
handle_kernel_image
(
efi_system_table_t
*
sys_table
,
unsigned
long
*
image_addr
,
unsigned
long
*
image_size
,
unsigned
long
*
reserve_addr
,
unsigned
long
*
reserve_size
,
unsigned
long
dram_base
,
efi_loaded_image_t
*
image
)
{
unsigned
long
nr_pages
;
efi_status_t
status
;
/* Use alloc_addr to tranlsate between types */
efi_physical_addr_t
alloc_addr
;
/*
* Verify that the DRAM base address is compatible with the ARM
* boot protocol, which determines the base of DRAM by masking
* off the low 27 bits of the address at which the zImage is
* loaded. These assumptions are made by the decompressor,
* before any memory map is available.
*/
dram_base
=
round_up
(
dram_base
,
SZ_128M
);
/*
* Reserve memory for the uncompressed kernel image. This is
* all that prevents any future allocations from conflicting
* with the kernel. Since we can't tell from the compressed
* image how much DRAM the kernel actually uses (due to BSS
* size uncertainty) we allocate the maximum possible size.
* Do this very early, as prints can cause memory allocations
* that may conflict with this.
*/
alloc_addr
=
dram_base
;
*
reserve_size
=
MAX_UNCOMP_KERNEL_SIZE
;
nr_pages
=
round_up
(
*
reserve_size
,
EFI_PAGE_SIZE
)
/
EFI_PAGE_SIZE
;
status
=
sys_table
->
boottime
->
allocate_pages
(
EFI_ALLOCATE_ADDRESS
,
EFI_LOADER_DATA
,
nr_pages
,
&
alloc_addr
);
if
(
status
!=
EFI_SUCCESS
)
{
*
reserve_size
=
0
;
pr_efi_err
(
sys_table
,
"Unable to allocate memory for uncompressed kernel.
\n
"
);
return
status
;
}
*
reserve_addr
=
alloc_addr
;
/*
* Relocate the zImage, so that it appears in the lowest 128 MB
* memory window.
*/
*
image_size
=
image
->
image_size
;
status
=
efi_relocate_kernel
(
sys_table
,
image_addr
,
*
image_size
,
*
image_size
,
dram_base
+
MAX_UNCOMP_KERNEL_SIZE
,
0
);
if
(
status
!=
EFI_SUCCESS
)
{
pr_efi_err
(
sys_table
,
"Failed to relocate kernel.
\n
"
);
efi_free
(
sys_table
,
*
reserve_size
,
*
reserve_addr
);
*
reserve_size
=
0
;
return
status
;
}
/*
* Check to see if we were able to allocate memory low enough
* in memory. The kernel determines the base of DRAM from the
* address at which the zImage is loaded.
*/
if
(
*
image_addr
+
*
image_size
>
dram_base
+
ZIMAGE_OFFSET_LIMIT
)
{
pr_efi_err
(
sys_table
,
"Failed to relocate kernel, no low memory available.
\n
"
);
efi_free
(
sys_table
,
*
reserve_size
,
*
reserve_addr
);
*
reserve_size
=
0
;
efi_free
(
sys_table
,
*
image_size
,
*
image_addr
);
*
image_size
=
0
;
return
EFI_LOAD_ERROR
;
}
return
EFI_SUCCESS
;
}
include/linux/memblock.h
View file @
6660800f
...
...
@@ -25,6 +25,7 @@ enum {
MEMBLOCK_NONE
=
0x0
,
/* No special request */
MEMBLOCK_HOTPLUG
=
0x1
,
/* hotpluggable region */
MEMBLOCK_MIRROR
=
0x2
,
/* mirrored region */
MEMBLOCK_NOMAP
=
0x4
,
/* don't add to kernel direct mapping */
};
struct
memblock_region
{
...
...
@@ -82,6 +83,7 @@ bool memblock_overlaps_region(struct memblock_type *type,
int
memblock_mark_hotplug
(
phys_addr_t
base
,
phys_addr_t
size
);
int
memblock_clear_hotplug
(
phys_addr_t
base
,
phys_addr_t
size
);
int
memblock_mark_mirror
(
phys_addr_t
base
,
phys_addr_t
size
);
int
memblock_mark_nomap
(
phys_addr_t
base
,
phys_addr_t
size
);
ulong
choose_memblock_flags
(
void
);
/* Low level functions */
...
...
@@ -184,6 +186,11 @@ static inline bool memblock_is_mirror(struct memblock_region *m)
return
m
->
flags
&
MEMBLOCK_MIRROR
;
}
static
inline
bool
memblock_is_nomap
(
struct
memblock_region
*
m
)
{
return
m
->
flags
&
MEMBLOCK_NOMAP
;
}
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int
memblock_search_pfn_nid
(
unsigned
long
pfn
,
unsigned
long
*
start_pfn
,
unsigned
long
*
end_pfn
);
...
...
@@ -319,6 +326,7 @@ phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t
memblock_end_of_DRAM
(
void
);
void
memblock_enforce_memory_limit
(
phys_addr_t
memory_limit
);
int
memblock_is_memory
(
phys_addr_t
addr
);
int
memblock_is_map_memory
(
phys_addr_t
addr
);
int
memblock_is_region_memory
(
phys_addr_t
base
,
phys_addr_t
size
);
int
memblock_is_reserved
(
phys_addr_t
addr
);
bool
memblock_is_region_reserved
(
phys_addr_t
base
,
phys_addr_t
size
);
...
...
mm/memblock.c
View file @
6660800f
...
...
@@ -822,6 +822,17 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
return
memblock_setclr_flag
(
base
,
size
,
1
,
MEMBLOCK_MIRROR
);
}
/**
* memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
* @base: the base phys addr of the region
* @size: the size of the region
*
* Return 0 on success, -errno on failure.
*/
int
__init_memblock
memblock_mark_nomap
(
phys_addr_t
base
,
phys_addr_t
size
)
{
return
memblock_setclr_flag
(
base
,
size
,
1
,
MEMBLOCK_NOMAP
);
}
/**
* __next_reserved_mem_region - next function for for_each_reserved_region()
...
...
@@ -913,6 +924,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
if
((
flags
&
MEMBLOCK_MIRROR
)
&&
!
memblock_is_mirror
(
m
))
continue
;
/* skip nomap memory unless we were asked for it explicitly */
if
(
!
(
flags
&
MEMBLOCK_NOMAP
)
&&
memblock_is_nomap
(
m
))
continue
;
if
(
!
type_b
)
{
if
(
out_start
)
*
out_start
=
m_start
;
...
...
@@ -1022,6 +1037,10 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
if
((
flags
&
MEMBLOCK_MIRROR
)
&&
!
memblock_is_mirror
(
m
))
continue
;
/* skip nomap memory unless we were asked for it explicitly */
if
(
!
(
flags
&
MEMBLOCK_NOMAP
)
&&
memblock_is_nomap
(
m
))
continue
;
if
(
!
type_b
)
{
if
(
out_start
)
*
out_start
=
m_start
;
...
...
@@ -1519,6 +1538,15 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
return
memblock_search
(
&
memblock
.
memory
,
addr
)
!=
-
1
;
}
int
__init_memblock
memblock_is_map_memory
(
phys_addr_t
addr
)
{
int
i
=
memblock_search
(
&
memblock
.
memory
,
addr
);
if
(
i
==
-
1
)
return
false
;
return
!
memblock_is_nomap
(
&
memblock
.
memory
.
regions
[
i
]);
}
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int
__init_memblock
memblock_search_pfn_nid
(
unsigned
long
pfn
,
unsigned
long
*
start_pfn
,
unsigned
long
*
end_pfn
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment