Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
81a63001
Commit
81a63001
authored
Oct 06, 2016
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'devel-stable' into for-linus
parents
301a36fa
32b63776
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
147 additions
and
105 deletions
+147
-105
arch/arm/include/asm/module.h
arch/arm/include/asm/module.h
+2
-4
arch/arm/kernel/module-plts.c
arch/arm/kernel/module-plts.c
+144
-99
arch/arm/kernel/module.lds
arch/arm/kernel/module.lds
+1
-2
No files found.
arch/arm/include/asm/module.h
View file @
81a63001
...
@@ -23,10 +23,8 @@ struct mod_arch_specific {
...
@@ -23,10 +23,8 @@ struct mod_arch_specific {
struct
unwind_table
*
unwind
[
ARM_SEC_MAX
];
struct
unwind_table
*
unwind
[
ARM_SEC_MAX
];
#endif
#endif
#ifdef CONFIG_ARM_MODULE_PLTS
#ifdef CONFIG_ARM_MODULE_PLTS
struct
elf32_shdr
*
core_plt
;
struct
elf32_shdr
*
plt
;
struct
elf32_shdr
*
init_plt
;
int
plt_count
;
int
core_plt_count
;
int
init_plt_count
;
#endif
#endif
};
};
...
...
arch/arm/kernel/module-plts.c
View file @
81a63001
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
#include <linux/elf.h>
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <asm/cache.h>
#include <asm/cache.h>
#include <asm/opcodes.h>
#include <asm/opcodes.h>
...
@@ -30,154 +31,198 @@ struct plt_entries {
...
@@ -30,154 +31,198 @@ struct plt_entries {
u32
lit
[
PLT_ENT_COUNT
];
u32
lit
[
PLT_ENT_COUNT
];
};
};
static
bool
in_init
(
const
struct
module
*
mod
,
u32
addr
)
u32
get_module_plt
(
struct
module
*
mod
,
unsigned
long
loc
,
Elf32_Addr
val
)
{
{
return
addr
-
(
u32
)
mod
->
init_layout
.
base
<
mod
->
init_layout
.
size
;
struct
plt_entries
*
plt
=
(
struct
plt_entries
*
)
mod
->
arch
.
plt
->
sh_addr
;
int
idx
=
0
;
/*
* Look for an existing entry pointing to 'val'. Given that the
* relocations are sorted, this will be the last entry we allocated.
* (if one exists).
*/
if
(
mod
->
arch
.
plt_count
>
0
)
{
plt
+=
(
mod
->
arch
.
plt_count
-
1
)
/
PLT_ENT_COUNT
;
idx
=
(
mod
->
arch
.
plt_count
-
1
)
%
PLT_ENT_COUNT
;
if
(
plt
->
lit
[
idx
]
==
val
)
return
(
u32
)
&
plt
->
ldr
[
idx
];
idx
=
(
idx
+
1
)
%
PLT_ENT_COUNT
;
if
(
!
idx
)
plt
++
;
}
mod
->
arch
.
plt_count
++
;
BUG_ON
(
mod
->
arch
.
plt_count
*
PLT_ENT_SIZE
>
mod
->
arch
.
plt
->
sh_size
);
if
(
!
idx
)
/* Populate a new set of entries */
*
plt
=
(
struct
plt_entries
){
{
[
0
...
PLT_ENT_COUNT
-
1
]
=
PLT_ENT_LDR
,
},
{
val
,
}
};
else
plt
->
lit
[
idx
]
=
val
;
return
(
u32
)
&
plt
->
ldr
[
idx
];
}
}
u32
get_module_plt
(
struct
module
*
mod
,
unsigned
long
loc
,
Elf32_Addr
val
)
#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
static
int
cmp_rel
(
const
void
*
a
,
const
void
*
b
)
{
{
struct
plt_entries
*
plt
,
*
plt_end
;
const
Elf32_Rel
*
x
=
a
,
*
y
=
b
;
int
c
,
*
count
;
int
i
;
if
(
in_init
(
mod
,
loc
))
{
plt
=
(
void
*
)
mod
->
arch
.
init_plt
->
sh_addr
;
plt_end
=
(
void
*
)
plt
+
mod
->
arch
.
init_plt
->
sh_size
;
count
=
&
mod
->
arch
.
init_plt_count
;
}
else
{
plt
=
(
void
*
)
mod
->
arch
.
core_plt
->
sh_addr
;
plt_end
=
(
void
*
)
plt
+
mod
->
arch
.
core_plt
->
sh_size
;
count
=
&
mod
->
arch
.
core_plt_count
;
}
/* Look for an existing entry pointing to 'val' */
/* sort by type and symbol index */
for
(
c
=
*
count
;
plt
<
plt_end
;
c
-=
PLT_ENT_COUNT
,
plt
++
)
{
i
=
cmp_3way
(
ELF32_R_TYPE
(
x
->
r_info
),
ELF32_R_TYPE
(
y
->
r_info
));
int
i
;
if
(
i
==
0
)
i
=
cmp_3way
(
ELF32_R_SYM
(
x
->
r_info
),
ELF32_R_SYM
(
y
->
r_info
));
if
(
!
c
)
{
return
i
;
/* Populate a new set of entries */
}
*
plt
=
(
struct
plt_entries
){
{
[
0
...
PLT_ENT_COUNT
-
1
]
=
PLT_ENT_LDR
,
},
static
bool
is_zero_addend_relocation
(
Elf32_Addr
base
,
const
Elf32_Rel
*
rel
)
{
val
,
}
{
};
u32
*
tval
=
(
u32
*
)(
base
+
rel
->
r_offset
);
++*
count
;
return
(
u32
)
plt
->
ldr
;
/*
}
* Do a bitwise compare on the raw addend rather than fully decoding
for
(
i
=
0
;
i
<
PLT_ENT_COUNT
;
i
++
)
{
* the offset and doing an arithmetic comparison.
if
(
!
plt
->
lit
[
i
])
{
* Note that a zero-addend jump/call relocation is encoded taking the
plt
->
lit
[
i
]
=
val
;
* PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
++*
count
;
*/
}
switch
(
ELF32_R_TYPE
(
rel
->
r_info
))
{
if
(
plt
->
lit
[
i
]
==
val
)
u16
upper
,
lower
;
return
(
u32
)
&
plt
->
ldr
[
i
];
}
case
R_ARM_THM_CALL
:
case
R_ARM_THM_JUMP24
:
upper
=
__mem_to_opcode_thumb16
(((
u16
*
)
tval
)[
0
]);
lower
=
__mem_to_opcode_thumb16
(((
u16
*
)
tval
)[
1
]);
return
(
upper
&
0x7ff
)
==
0x7ff
&&
(
lower
&
0x2fff
)
==
0x2ffe
;
case
R_ARM_CALL
:
case
R_ARM_PC24
:
case
R_ARM_JUMP24
:
return
(
__mem_to_opcode_arm
(
*
tval
)
&
0xffffff
)
==
0xfffffe
;
}
}
BUG
();
BUG
();
}
}
static
int
duplicate_rel
(
Elf32_Addr
base
,
const
Elf32_Rel
*
rel
,
int
num
,
static
bool
duplicate_rel
(
Elf32_Addr
base
,
const
Elf32_Rel
*
rel
,
int
num
)
u32
mask
)
{
{
u32
*
loc1
,
*
loc2
;
const
Elf32_Rel
*
prev
;
int
i
;
for
(
i
=
0
;
i
<
num
;
i
++
)
{
/*
if
(
rel
[
i
].
r_info
!=
rel
[
num
].
r_info
)
* Entries are sorted by type and symbol index. That means that,
continue
;
* if a duplicate entry exists, it must be in the preceding
* slot.
*/
if
(
!
num
)
return
false
;
/*
prev
=
rel
+
num
-
1
;
* Identical relocation types against identical symbols can
return
cmp_rel
(
rel
+
num
,
prev
)
==
0
&&
* still result in different PLT entries if the addend in the
is_zero_addend_relocation
(
base
,
prev
);
* place is different. So resolve the target of the relocation
* to compare the values.
*/
loc1
=
(
u32
*
)(
base
+
rel
[
i
].
r_offset
);
loc2
=
(
u32
*
)(
base
+
rel
[
num
].
r_offset
);
if
(((
*
loc1
^
*
loc2
)
&
mask
)
==
0
)
return
1
;
}
return
0
;
}
}
/* Count how many PLT entries we may need */
/* Count how many PLT entries we may need */
static
unsigned
int
count_plts
(
Elf32_Addr
base
,
const
Elf32_Rel
*
rel
,
int
num
)
static
unsigned
int
count_plts
(
const
Elf32_Sym
*
syms
,
Elf32_Addr
base
,
const
Elf32_Rel
*
rel
,
int
num
)
{
{
unsigned
int
ret
=
0
;
unsigned
int
ret
=
0
;
const
Elf32_Sym
*
s
;
int
i
;
int
i
;
/*
for
(
i
=
0
;
i
<
num
;
i
++
)
{
* Sure, this is order(n^2), but it's usually short, and not
* time critical
*/
for
(
i
=
0
;
i
<
num
;
i
++
)
switch
(
ELF32_R_TYPE
(
rel
[
i
].
r_info
))
{
switch
(
ELF32_R_TYPE
(
rel
[
i
].
r_info
))
{
case
R_ARM_CALL
:
case
R_ARM_CALL
:
case
R_ARM_PC24
:
case
R_ARM_PC24
:
case
R_ARM_JUMP24
:
case
R_ARM_JUMP24
:
if
(
!
duplicate_rel
(
base
,
rel
,
i
,
__opcode_to_mem_arm
(
0x00ffffff
)))
ret
++
;
break
;
#ifdef CONFIG_THUMB2_KERNEL
case
R_ARM_THM_CALL
:
case
R_ARM_THM_CALL
:
case
R_ARM_THM_JUMP24
:
case
R_ARM_THM_JUMP24
:
if
(
!
duplicate_rel
(
base
,
rel
,
i
,
/*
__opcode_to_mem_thumb32
(
0x07ff2fff
)))
* We only have to consider branch targets that resolve
* to undefined symbols. This is not simply a heuristic,
* it is a fundamental limitation, since the PLT itself
* is part of the module, and needs to be within range
* as well, so modules can never grow beyond that limit.
*/
s
=
syms
+
ELF32_R_SYM
(
rel
[
i
].
r_info
);
if
(
s
->
st_shndx
!=
SHN_UNDEF
)
break
;
/*
* Jump relocations with non-zero addends against
* undefined symbols are supported by the ELF spec, but
* do not occur in practice (e.g., 'jump n bytes past
* the entry point of undefined function symbol f').
* So we need to support them, but there is no need to
* take them into consideration when trying to optimize
* this code. So let's only check for duplicates when
* the addend is zero.
*/
if
(
!
is_zero_addend_relocation
(
base
,
rel
+
i
)
||
!
duplicate_rel
(
base
,
rel
,
i
))
ret
++
;
ret
++
;
#endif
}
}
}
return
ret
;
return
ret
;
}
}
int
module_frob_arch_sections
(
Elf_Ehdr
*
ehdr
,
Elf_Shdr
*
sechdrs
,
int
module_frob_arch_sections
(
Elf_Ehdr
*
ehdr
,
Elf_Shdr
*
sechdrs
,
char
*
secstrings
,
struct
module
*
mod
)
char
*
secstrings
,
struct
module
*
mod
)
{
{
unsigned
long
core_plts
=
0
,
init_
plts
=
0
;
unsigned
long
plts
=
0
;
Elf32_Shdr
*
s
,
*
sechdrs_end
=
sechdrs
+
ehdr
->
e_shnum
;
Elf32_Shdr
*
s
,
*
sechdrs_end
=
sechdrs
+
ehdr
->
e_shnum
;
Elf32_Sym
*
syms
=
NULL
;
/*
/*
* To store the PLTs, we expand the .text section for core module code
* To store the PLTs, we expand the .text section for core module code
* and
the .init.text section
for initialization code.
* and for initialization code.
*/
*/
for
(
s
=
sechdrs
;
s
<
sechdrs_end
;
++
s
)
for
(
s
=
sechdrs
;
s
<
sechdrs_end
;
++
s
)
{
if
(
strcmp
(
".core.plt"
,
secstrings
+
s
->
sh_name
)
==
0
)
if
(
strcmp
(
".plt"
,
secstrings
+
s
->
sh_name
)
==
0
)
mod
->
arch
.
core_plt
=
s
;
mod
->
arch
.
plt
=
s
;
else
if
(
strcmp
(
".init.plt"
,
secstrings
+
s
->
sh_name
)
==
0
)
else
if
(
s
->
sh_type
==
SHT_SYMTAB
)
mod
->
arch
.
init_plt
=
s
;
syms
=
(
Elf32_Sym
*
)
s
->
sh_addr
;
}
if
(
!
mod
->
arch
.
core_plt
||
!
mod
->
arch
.
init_plt
)
{
pr_err
(
"%s: sections missing
\n
"
,
mod
->
name
);
if
(
!
mod
->
arch
.
plt
)
{
pr_err
(
"%s: module PLT section missing
\n
"
,
mod
->
name
);
return
-
ENOEXEC
;
}
if
(
!
syms
)
{
pr_err
(
"%s: module symtab section missing
\n
"
,
mod
->
name
);
return
-
ENOEXEC
;
return
-
ENOEXEC
;
}
}
for
(
s
=
sechdrs
+
1
;
s
<
sechdrs_end
;
++
s
)
{
for
(
s
=
sechdrs
+
1
;
s
<
sechdrs_end
;
++
s
)
{
const
Elf32_Rel
*
rels
=
(
void
*
)
ehdr
+
s
->
sh_offset
;
Elf32_Rel
*
rels
=
(
void
*
)
ehdr
+
s
->
sh_offset
;
int
numrels
=
s
->
sh_size
/
sizeof
(
Elf32_Rel
);
int
numrels
=
s
->
sh_size
/
sizeof
(
Elf32_Rel
);
Elf32_Shdr
*
dstsec
=
sechdrs
+
s
->
sh_info
;
Elf32_Shdr
*
dstsec
=
sechdrs
+
s
->
sh_info
;
if
(
s
->
sh_type
!=
SHT_REL
)
if
(
s
->
sh_type
!=
SHT_REL
)
continue
;
continue
;
if
(
strstr
(
secstrings
+
s
->
sh_name
,
".init"
))
/* ignore relocations that operate on non-exec sections */
init_plts
+=
count_plts
(
dstsec
->
sh_addr
,
rels
,
numrels
);
if
(
!
(
dstsec
->
sh_flags
&
SHF_EXECINSTR
))
else
continue
;
core_plts
+=
count_plts
(
dstsec
->
sh_addr
,
rels
,
numrels
);
/* sort by type and symbol index */
sort
(
rels
,
numrels
,
sizeof
(
Elf32_Rel
),
cmp_rel
,
NULL
);
plts
+=
count_plts
(
syms
,
dstsec
->
sh_addr
,
rels
,
numrels
);
}
}
mod
->
arch
.
core_plt
->
sh_type
=
SHT_NOBITS
;
mod
->
arch
.
plt
->
sh_type
=
SHT_NOBITS
;
mod
->
arch
.
core_plt
->
sh_flags
=
SHF_EXECINSTR
|
SHF_ALLOC
;
mod
->
arch
.
plt
->
sh_flags
=
SHF_EXECINSTR
|
SHF_ALLOC
;
mod
->
arch
.
core_plt
->
sh_addralign
=
L1_CACHE_BYTES
;
mod
->
arch
.
plt
->
sh_addralign
=
L1_CACHE_BYTES
;
mod
->
arch
.
core_plt
->
sh_size
=
round_up
(
core_plts
*
PLT_ENT_SIZE
,
mod
->
arch
.
plt
->
sh_size
=
round_up
(
plts
*
PLT_ENT_SIZE
,
sizeof
(
struct
plt_entries
));
sizeof
(
struct
plt_entries
));
mod
->
arch
.
core_plt_count
=
0
;
mod
->
arch
.
plt_count
=
0
;
mod
->
arch
.
init_plt
->
sh_type
=
SHT_NOBITS
;
pr_debug
(
"%s: plt=%x
\n
"
,
__func__
,
mod
->
arch
.
plt
->
sh_size
);
mod
->
arch
.
init_plt
->
sh_flags
=
SHF_EXECINSTR
|
SHF_ALLOC
;
mod
->
arch
.
init_plt
->
sh_addralign
=
L1_CACHE_BYTES
;
mod
->
arch
.
init_plt
->
sh_size
=
round_up
(
init_plts
*
PLT_ENT_SIZE
,
sizeof
(
struct
plt_entries
));
mod
->
arch
.
init_plt_count
=
0
;
pr_debug
(
"%s: core.plt=%x, init.plt=%x
\n
"
,
__func__
,
mod
->
arch
.
core_plt
->
sh_size
,
mod
->
arch
.
init_plt
->
sh_size
);
return
0
;
return
0
;
}
}
arch/arm/kernel/module.lds
View file @
81a63001
SECTIONS {
SECTIONS {
.core.plt : { BYTE(0) }
.plt : { BYTE(0) }
.init.plt : { BYTE(0) }
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment