Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
f8f238ff
Commit
f8f238ff
authored
Apr 18, 2023
by
Andrew Morton
Browse files
Options
Browse Files
Download
Plain Diff
sync mm-stable with mm-hotfixes-stable to pick up depended-upon upstream changes
parents
e492cd61
ef832747
Changes
11
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
247 additions
and
102 deletions
+247
-102
fs/nilfs2/segment.c
fs/nilfs2/segment.c
+20
-0
include/linux/kmsan.h
include/linux/kmsan.h
+21
-18
kernel/fork.c
kernel/fork.c
+1
-0
kernel/sys.c
kernel/sys.c
+40
-29
lib/maple_tree.c
lib/maple_tree.c
+24
-23
mm/kmsan/hooks.c
mm/kmsan/hooks.c
+47
-8
mm/kmsan/shadow.c
mm/kmsan/shadow.c
+18
-9
mm/mmap.c
mm/mmap.c
+43
-5
mm/page_alloc.c
mm/page_alloc.c
+19
-0
mm/vmalloc.c
mm/vmalloc.c
+7
-3
tools/Makefile
tools/Makefile
+7
-7
No files found.
fs/nilfs2/segment.c
View file @
f8f238ff
...
...
@@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
return
0
;
}
/**
* nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
* @sci: segment constructor object
*
* nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
* the current segment summary block.
*/
static
void
nilfs_segctor_zeropad_segsum
(
struct
nilfs_sc_info
*
sci
)
{
struct
nilfs_segsum_pointer
*
ssp
;
ssp
=
sci
->
sc_blk_cnt
>
0
?
&
sci
->
sc_binfo_ptr
:
&
sci
->
sc_finfo_ptr
;
if
(
ssp
->
offset
<
ssp
->
bh
->
b_size
)
memset
(
ssp
->
bh
->
b_data
+
ssp
->
offset
,
0
,
ssp
->
bh
->
b_size
-
ssp
->
offset
);
}
static
int
nilfs_segctor_feed_segment
(
struct
nilfs_sc_info
*
sci
)
{
sci
->
sc_nblk_this_inc
+=
sci
->
sc_curseg
->
sb_sum
.
nblocks
;
...
...
@@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
* The current segment is filled up
* (internal code)
*/
nilfs_segctor_zeropad_segsum
(
sci
);
sci
->
sc_curseg
=
NILFS_NEXT_SEGBUF
(
sci
->
sc_curseg
);
return
nilfs_segctor_reset_segment_buffer
(
sci
);
}
...
...
@@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
goto
retry
;
}
if
(
unlikely
(
required
))
{
nilfs_segctor_zeropad_segsum
(
sci
);
err
=
nilfs_segbuf_extend_segsum
(
segbuf
);
if
(
unlikely
(
err
))
goto
failed
;
...
...
@@ -1533,6 +1552,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
nadd
=
min_t
(
int
,
nadd
<<
1
,
SC_MAX_SEGDELTA
);
sci
->
sc_stage
=
prev_stage
;
}
nilfs_segctor_zeropad_segsum
(
sci
);
nilfs_segctor_truncate_segments
(
sci
,
sci
->
sc_curseg
,
nilfs
->
ns_sufile
);
return
0
;
...
...
include/linux/kmsan.h
View file @
f8f238ff
...
...
@@ -134,9 +134,10 @@ void kmsan_kfree_large(const void *ptr);
* @page_shift: page_shift passed to vmap_range_noflush().
*
* KMSAN maps shadow and origin pages of @pages into contiguous ranges in
* vmalloc metadata address range.
* vmalloc metadata address range. Returns 0 on success, callers must check
* for non-zero return value.
*/
void
kmsan_vmap_pages_range_noflush
(
unsigned
long
start
,
unsigned
long
end
,
int
kmsan_vmap_pages_range_noflush
(
unsigned
long
start
,
unsigned
long
end
,
pgprot_t
prot
,
struct
page
**
pages
,
unsigned
int
page_shift
);
...
...
@@ -159,9 +160,10 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
* @page_shift: page_shift argument passed to vmap_range_noflush().
*
* KMSAN creates new metadata pages for the physical pages mapped into the
* virtual memory.
* virtual memory. Returns 0 on success, callers must check for non-zero return
* value.
*/
void
kmsan_ioremap_page_range
(
unsigned
long
addr
,
unsigned
long
end
,
int
kmsan_ioremap_page_range
(
unsigned
long
addr
,
unsigned
long
end
,
phys_addr_t
phys_addr
,
pgprot_t
prot
,
unsigned
int
page_shift
);
...
...
@@ -281,12 +283,13 @@ static inline void kmsan_kfree_large(const void *ptr)
{
}
static
inline
void
kmsan_vmap_pages_range_noflush
(
unsigned
long
start
,
static
inline
int
kmsan_vmap_pages_range_noflush
(
unsigned
long
start
,
unsigned
long
end
,
pgprot_t
prot
,
struct
page
**
pages
,
unsigned
int
page_shift
)
{
return
0
;
}
static
inline
void
kmsan_vunmap_range_noflush
(
unsigned
long
start
,
...
...
@@ -294,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
{
}
static
inline
void
kmsan_ioremap_page_range
(
unsigned
long
start
,
static
inline
int
kmsan_ioremap_page_range
(
unsigned
long
start
,
unsigned
long
end
,
phys_addr_t
phys_addr
,
pgprot_t
prot
,
phys_addr_t
phys_addr
,
pgprot_t
prot
,
unsigned
int
page_shift
)
{
return
0
;
}
static
inline
void
kmsan_iounmap_page_range
(
unsigned
long
start
,
...
...
kernel/fork.c
View file @
f8f238ff
...
...
@@ -1308,6 +1308,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
fail_pcpu:
while
(
i
>
0
)
percpu_counter_destroy
(
&
mm
->
rss_stat
[
--
i
]);
destroy_context
(
mm
);
fail_nocontext:
mm_free_pgd
(
mm
);
fail_nopgd:
...
...
kernel/sys.c
View file @
f8f238ff
...
...
@@ -664,6 +664,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
struct
cred
*
new
;
int
retval
;
kuid_t
kruid
,
keuid
,
ksuid
;
bool
ruid_new
,
euid_new
,
suid_new
;
kruid
=
make_kuid
(
ns
,
ruid
);
keuid
=
make_kuid
(
ns
,
euid
);
...
...
@@ -678,25 +679,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
if
((
suid
!=
(
uid_t
)
-
1
)
&&
!
uid_valid
(
ksuid
))
return
-
EINVAL
;
old
=
current_cred
();
/* check for no-op */
if
((
ruid
==
(
uid_t
)
-
1
||
uid_eq
(
kruid
,
old
->
uid
))
&&
(
euid
==
(
uid_t
)
-
1
||
(
uid_eq
(
keuid
,
old
->
euid
)
&&
uid_eq
(
keuid
,
old
->
fsuid
)))
&&
(
suid
==
(
uid_t
)
-
1
||
uid_eq
(
ksuid
,
old
->
suid
)))
return
0
;
ruid_new
=
ruid
!=
(
uid_t
)
-
1
&&
!
uid_eq
(
kruid
,
old
->
uid
)
&&
!
uid_eq
(
kruid
,
old
->
euid
)
&&
!
uid_eq
(
kruid
,
old
->
suid
);
euid_new
=
euid
!=
(
uid_t
)
-
1
&&
!
uid_eq
(
keuid
,
old
->
uid
)
&&
!
uid_eq
(
keuid
,
old
->
euid
)
&&
!
uid_eq
(
keuid
,
old
->
suid
);
suid_new
=
suid
!=
(
uid_t
)
-
1
&&
!
uid_eq
(
ksuid
,
old
->
uid
)
&&
!
uid_eq
(
ksuid
,
old
->
euid
)
&&
!
uid_eq
(
ksuid
,
old
->
suid
);
if
((
ruid_new
||
euid_new
||
suid_new
)
&&
!
ns_capable_setid
(
old
->
user_ns
,
CAP_SETUID
))
return
-
EPERM
;
new
=
prepare_creds
();
if
(
!
new
)
return
-
ENOMEM
;
old
=
current_cred
();
retval
=
-
EPERM
;
if
(
!
ns_capable_setid
(
old
->
user_ns
,
CAP_SETUID
))
{
if
(
ruid
!=
(
uid_t
)
-
1
&&
!
uid_eq
(
kruid
,
old
->
uid
)
&&
!
uid_eq
(
kruid
,
old
->
euid
)
&&
!
uid_eq
(
kruid
,
old
->
suid
))
goto
error
;
if
(
euid
!=
(
uid_t
)
-
1
&&
!
uid_eq
(
keuid
,
old
->
uid
)
&&
!
uid_eq
(
keuid
,
old
->
euid
)
&&
!
uid_eq
(
keuid
,
old
->
suid
))
goto
error
;
if
(
suid
!=
(
uid_t
)
-
1
&&
!
uid_eq
(
ksuid
,
old
->
uid
)
&&
!
uid_eq
(
ksuid
,
old
->
euid
)
&&
!
uid_eq
(
ksuid
,
old
->
suid
))
goto
error
;
}
if
(
ruid
!=
(
uid_t
)
-
1
)
{
new
->
uid
=
kruid
;
if
(
!
uid_eq
(
kruid
,
old
->
uid
))
{
...
...
@@ -761,6 +766,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
struct
cred
*
new
;
int
retval
;
kgid_t
krgid
,
kegid
,
ksgid
;
bool
rgid_new
,
egid_new
,
sgid_new
;
krgid
=
make_kgid
(
ns
,
rgid
);
kegid
=
make_kgid
(
ns
,
egid
);
...
...
@@ -773,23 +779,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
if
((
sgid
!=
(
gid_t
)
-
1
)
&&
!
gid_valid
(
ksgid
))
return
-
EINVAL
;
old
=
current_cred
();
/* check for no-op */
if
((
rgid
==
(
gid_t
)
-
1
||
gid_eq
(
krgid
,
old
->
gid
))
&&
(
egid
==
(
gid_t
)
-
1
||
(
gid_eq
(
kegid
,
old
->
egid
)
&&
gid_eq
(
kegid
,
old
->
fsgid
)))
&&
(
sgid
==
(
gid_t
)
-
1
||
gid_eq
(
ksgid
,
old
->
sgid
)))
return
0
;
rgid_new
=
rgid
!=
(
gid_t
)
-
1
&&
!
gid_eq
(
krgid
,
old
->
gid
)
&&
!
gid_eq
(
krgid
,
old
->
egid
)
&&
!
gid_eq
(
krgid
,
old
->
sgid
);
egid_new
=
egid
!=
(
gid_t
)
-
1
&&
!
gid_eq
(
kegid
,
old
->
gid
)
&&
!
gid_eq
(
kegid
,
old
->
egid
)
&&
!
gid_eq
(
kegid
,
old
->
sgid
);
sgid_new
=
sgid
!=
(
gid_t
)
-
1
&&
!
gid_eq
(
ksgid
,
old
->
gid
)
&&
!
gid_eq
(
ksgid
,
old
->
egid
)
&&
!
gid_eq
(
ksgid
,
old
->
sgid
);
if
((
rgid_new
||
egid_new
||
sgid_new
)
&&
!
ns_capable_setid
(
old
->
user_ns
,
CAP_SETGID
))
return
-
EPERM
;
new
=
prepare_creds
();
if
(
!
new
)
return
-
ENOMEM
;
old
=
current_cred
();
retval
=
-
EPERM
;
if
(
!
ns_capable_setid
(
old
->
user_ns
,
CAP_SETGID
))
{
if
(
rgid
!=
(
gid_t
)
-
1
&&
!
gid_eq
(
krgid
,
old
->
gid
)
&&
!
gid_eq
(
krgid
,
old
->
egid
)
&&
!
gid_eq
(
krgid
,
old
->
sgid
))
goto
error
;
if
(
egid
!=
(
gid_t
)
-
1
&&
!
gid_eq
(
kegid
,
old
->
gid
)
&&
!
gid_eq
(
kegid
,
old
->
egid
)
&&
!
gid_eq
(
kegid
,
old
->
sgid
))
goto
error
;
if
(
sgid
!=
(
gid_t
)
-
1
&&
!
gid_eq
(
ksgid
,
old
->
gid
)
&&
!
gid_eq
(
ksgid
,
old
->
egid
)
&&
!
gid_eq
(
ksgid
,
old
->
sgid
))
goto
error
;
}
if
(
rgid
!=
(
gid_t
)
-
1
)
new
->
gid
=
krgid
;
...
...
lib/maple_tree.c
View file @
f8f238ff
...
...
@@ -4965,7 +4965,8 @@ static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
* Return: True if found in a leaf, false otherwise.
*
*/
static
bool
mas_rev_awalk
(
struct
ma_state
*
mas
,
unsigned
long
size
)
static
bool
mas_rev_awalk
(
struct
ma_state
*
mas
,
unsigned
long
size
,
unsigned
long
*
gap_min
,
unsigned
long
*
gap_max
)
{
enum
maple_type
type
=
mte_node_type
(
mas
->
node
);
struct
maple_node
*
node
=
mas_mn
(
mas
);
...
...
@@ -5030,8 +5031,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
if
(
unlikely
(
ma_is_leaf
(
type
)))
{
mas
->
offset
=
offset
;
mas
->
min
=
min
;
mas
->
max
=
min
+
gap
-
1
;
*
gap_
min
=
min
;
*
gap_
max
=
min
+
gap
-
1
;
return
true
;
}
...
...
@@ -5055,10 +5056,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
{
enum
maple_type
type
=
mte_node_type
(
mas
->
node
);
unsigned
long
pivot
,
min
,
gap
=
0
;
unsigned
char
offset
;
unsigned
long
*
gaps
;
unsigned
long
*
pivots
=
ma_pivots
(
mas_mn
(
mas
),
type
)
;
void
__rcu
**
slots
=
ma_slots
(
mas_mn
(
mas
),
type
)
;
unsigned
char
offset
,
data_end
;
unsigned
long
*
gaps
,
*
pivots
;
void
__rcu
**
slots
;
struct
maple_node
*
node
;
bool
found
=
false
;
if
(
ma_is_dense
(
type
))
{
...
...
@@ -5066,13 +5067,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
return
true
;
}
gaps
=
ma_gaps
(
mte_to_node
(
mas
->
node
),
type
);
node
=
mas_mn
(
mas
);
pivots
=
ma_pivots
(
node
,
type
);
slots
=
ma_slots
(
node
,
type
);
gaps
=
ma_gaps
(
node
,
type
);
offset
=
mas
->
offset
;
min
=
mas_safe_min
(
mas
,
pivots
,
offset
);
for
(;
offset
<
mt_slots
[
type
];
offset
++
)
{
pivot
=
mas_safe_pivot
(
mas
,
pivots
,
offset
,
type
);
if
(
offset
&&
!
pivot
)
break
;
data_end
=
ma_data_end
(
node
,
type
,
pivots
,
mas
->
max
);
for
(;
offset
<=
data_end
;
offset
++
)
{
pivot
=
mas_logical_pivot
(
mas
,
pivots
,
offset
,
type
);
/* Not within lower bounds */
if
(
mas
->
index
>
pivot
)
...
...
@@ -5307,6 +5310,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
unsigned
long
*
pivots
;
enum
maple_type
mt
;
if
(
min
>=
max
)
return
-
EINVAL
;
if
(
mas_is_start
(
mas
))
mas_start
(
mas
);
else
if
(
mas
->
offset
>=
2
)
...
...
@@ -5361,6 +5367,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
{
struct
maple_enode
*
last
=
mas
->
node
;
if
(
min
>=
max
)
return
-
EINVAL
;
if
(
mas_is_start
(
mas
))
{
mas_start
(
mas
);
mas
->
offset
=
mas_data_end
(
mas
);
...
...
@@ -5380,7 +5389,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
mas
->
index
=
min
;
mas
->
last
=
max
;
while
(
!
mas_rev_awalk
(
mas
,
size
))
{
while
(
!
mas_rev_awalk
(
mas
,
size
,
&
min
,
&
max
))
{
if
(
last
==
mas
->
node
)
{
if
(
!
mas_rewind_node
(
mas
))
return
-
EBUSY
;
...
...
@@ -5395,17 +5404,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
if
(
unlikely
(
mas
->
offset
==
MAPLE_NODE_SLOTS
))
return
-
EBUSY
;
/*
* mas_rev_awalk() has set mas->min and mas->max to the gap values. If
* the maximum is outside the window we are searching, then use the last
* location in the search.
* mas->max and mas->min is the range of the gap.
* mas->index and mas->last are currently set to the search range.
*/
/* Trim the upper limit to the max. */
if
(
ma
s
->
ma
x
<=
mas
->
last
)
mas
->
last
=
ma
s
->
ma
x
;
if
(
max
<=
mas
->
last
)
mas
->
last
=
max
;
mas
->
index
=
mas
->
last
-
size
+
1
;
return
0
;
...
...
mm/kmsan/hooks.c
View file @
f8f238ff
...
...
@@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
* into the virtual memory. If those physical pages already had shadow/origin,
* those are ignored.
*/
void
kmsan_ioremap_page_range
(
unsigned
long
start
,
unsigned
long
end
,
int
kmsan_ioremap_page_range
(
unsigned
long
start
,
unsigned
long
end
,
phys_addr_t
phys_addr
,
pgprot_t
prot
,
unsigned
int
page_shift
)
{
gfp_t
gfp_mask
=
GFP_KERNEL
|
__GFP_ZERO
;
struct
page
*
shadow
,
*
origin
;
unsigned
long
off
=
0
;
int
nr
;
int
nr
,
err
=
0
,
clean
=
0
,
mapped
;
if
(
!
kmsan_enabled
||
kmsan_in_runtime
())
return
;
return
0
;
nr
=
(
end
-
start
)
/
PAGE_SIZE
;
kmsan_enter_runtime
();
for
(
int
i
=
0
;
i
<
nr
;
i
++
,
off
+=
PAGE_SIZE
)
{
for
(
int
i
=
0
;
i
<
nr
;
i
++
,
off
+=
PAGE_SIZE
,
clean
=
i
)
{
shadow
=
alloc_pages
(
gfp_mask
,
1
);
origin
=
alloc_pages
(
gfp_mask
,
1
);
__vmap_pages_range_noflush
(
if
(
!
shadow
||
!
origin
)
{
err
=
-
ENOMEM
;
goto
ret
;
}
mapped
=
__vmap_pages_range_noflush
(
vmalloc_shadow
(
start
+
off
),
vmalloc_shadow
(
start
+
off
+
PAGE_SIZE
),
prot
,
&
shadow
,
PAGE_SHIFT
);
__vmap_pages_range_noflush
(
if
(
mapped
)
{
err
=
mapped
;
goto
ret
;
}
shadow
=
NULL
;
mapped
=
__vmap_pages_range_noflush
(
vmalloc_origin
(
start
+
off
),
vmalloc_origin
(
start
+
off
+
PAGE_SIZE
),
prot
,
&
origin
,
PAGE_SHIFT
);
if
(
mapped
)
{
__vunmap_range_noflush
(
vmalloc_shadow
(
start
+
off
),
vmalloc_shadow
(
start
+
off
+
PAGE_SIZE
));
err
=
mapped
;
goto
ret
;
}
origin
=
NULL
;
}
/* Page mapping loop finished normally, nothing to clean up. */
clean
=
0
;
ret:
if
(
clean
>
0
)
{
/*
* Something went wrong. Clean up shadow/origin pages allocated
* on the last loop iteration, then delete mappings created
* during the previous iterations.
*/
if
(
shadow
)
__free_pages
(
shadow
,
1
);
if
(
origin
)
__free_pages
(
origin
,
1
);
__vunmap_range_noflush
(
vmalloc_shadow
(
start
),
vmalloc_shadow
(
start
+
clean
*
PAGE_SIZE
));
__vunmap_range_noflush
(
vmalloc_origin
(
start
),
vmalloc_origin
(
start
+
clean
*
PAGE_SIZE
));
}
flush_cache_vmap
(
vmalloc_shadow
(
start
),
vmalloc_shadow
(
end
));
flush_cache_vmap
(
vmalloc_origin
(
start
),
vmalloc_origin
(
end
));
kmsan_leave_runtime
();
return
err
;
}
void
kmsan_iounmap_page_range
(
unsigned
long
start
,
unsigned
long
end
)
...
...
mm/kmsan/shadow.c
View file @
f8f238ff
...
...
@@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
kmsan_leave_runtime
();
}
void
kmsan_vmap_pages_range_noflush
(
unsigned
long
start
,
unsigned
long
end
,
int
kmsan_vmap_pages_range_noflush
(
unsigned
long
start
,
unsigned
long
end
,
pgprot_t
prot
,
struct
page
**
pages
,
unsigned
int
page_shift
)
{
unsigned
long
shadow_start
,
origin_start
,
shadow_end
,
origin_end
;
struct
page
**
s_pages
,
**
o_pages
;
int
nr
,
mapped
;
int
nr
,
mapped
,
err
=
0
;
if
(
!
kmsan_enabled
)
return
;
return
0
;
shadow_start
=
vmalloc_meta
((
void
*
)
start
,
KMSAN_META_SHADOW
);
shadow_end
=
vmalloc_meta
((
void
*
)
end
,
KMSAN_META_SHADOW
);
if
(
!
shadow_start
)
return
;
return
0
;
nr
=
(
end
-
start
)
/
PAGE_SIZE
;
s_pages
=
kcalloc
(
nr
,
sizeof
(
*
s_pages
),
GFP_KERNEL
);
o_pages
=
kcalloc
(
nr
,
sizeof
(
*
o_pages
),
GFP_KERNEL
);
if
(
!
s_pages
||
!
o_pages
)
if
(
!
s_pages
||
!
o_pages
)
{
err
=
-
ENOMEM
;
goto
ret
;
}
for
(
int
i
=
0
;
i
<
nr
;
i
++
)
{
s_pages
[
i
]
=
shadow_page_for
(
pages
[
i
]);
o_pages
[
i
]
=
origin_page_for
(
pages
[
i
]);
...
...
@@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
kmsan_enter_runtime
();
mapped
=
__vmap_pages_range_noflush
(
shadow_start
,
shadow_end
,
prot
,
s_pages
,
page_shift
);
KMSAN_WARN_ON
(
mapped
);
if
(
mapped
)
{
err
=
mapped
;
goto
ret
;
}
mapped
=
__vmap_pages_range_noflush
(
origin_start
,
origin_end
,
prot
,
o_pages
,
page_shift
);
KMSAN_WARN_ON
(
mapped
);
if
(
mapped
)
{
err
=
mapped
;
goto
ret
;
}
kmsan_leave_runtime
();
flush_tlb_kernel_range
(
shadow_start
,
shadow_end
);
flush_tlb_kernel_range
(
origin_start
,
origin_end
);
...
...
@@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
ret:
kfree
(
s_pages
);
kfree
(
o_pages
);
return
err
;
}
/* Allocate metadata for pages allocated at boot time. */
...
...
mm/mmap.c
View file @
f8f238ff
...
...
@@ -1547,7 +1547,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
*/
static
unsigned
long
unmapped_area
(
struct
vm_unmapped_area_info
*
info
)
{
unsigned
long
length
,
gap
;
unsigned
long
length
,
gap
,
low_limit
;
struct
vm_area_struct
*
tmp
;
MA_STATE
(
mas
,
&
current
->
mm
->
mm_mt
,
0
,
0
);
...
...
@@ -1556,12 +1557,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
if
(
length
<
info
->
length
)
return
-
ENOMEM
;
if
(
mas_empty_area
(
&
mas
,
info
->
low_limit
,
info
->
high_limit
-
1
,
length
))
low_limit
=
info
->
low_limit
;
retry:
if
(
mas_empty_area
(
&
mas
,
low_limit
,
info
->
high_limit
-
1
,
length
))
return
-
ENOMEM
;
gap
=
mas
.
index
;
gap
+=
(
info
->
align_offset
-
gap
)
&
info
->
align_mask
;
tmp
=
mas_next
(
&
mas
,
ULONG_MAX
);
if
(
tmp
&&
(
tmp
->
vm_flags
&
VM_GROWSDOWN
))
{
/* Avoid prev check if possible */
if
(
vm_start_gap
(
tmp
)
<
gap
+
length
-
1
)
{
low_limit
=
tmp
->
vm_end
;
mas_reset
(
&
mas
);
goto
retry
;
}
}
else
{
tmp
=
mas_prev
(
&
mas
,
0
);
if
(
tmp
&&
vm_end_gap
(
tmp
)
>
gap
)
{
low_limit
=
vm_end_gap
(
tmp
);
mas_reset
(
&
mas
);
goto
retry
;
}
}
return
gap
;
}
...
...
@@ -1577,7 +1595,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
*/
static
unsigned
long
unmapped_area_topdown
(
struct
vm_unmapped_area_info
*
info
)
{
unsigned
long
length
,
gap
;
unsigned
long
length
,
gap
,
high_limit
,
gap_end
;
struct
vm_area_struct
*
tmp
;
MA_STATE
(
mas
,
&
current
->
mm
->
mm_mt
,
0
,
0
);
/* Adjust search length to account for worst case alignment overhead */
...
...
@@ -1585,12 +1604,31 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
if
(
length
<
info
->
length
)
return
-
ENOMEM
;
if
(
mas_empty_area_rev
(
&
mas
,
info
->
low_limit
,
info
->
high_limit
-
1
,
high_limit
=
info
->
high_limit
;
retry:
if
(
mas_empty_area_rev
(
&
mas
,
info
->
low_limit
,
high_limit
-
1
,
length
))
return
-
ENOMEM
;
gap
=
mas
.
last
+
1
-
info
->
length
;
gap
-=
(
gap
-
info
->
align_offset
)
&
info
->
align_mask
;
gap_end
=
mas
.
last
;
tmp
=
mas_next
(
&
mas
,
ULONG_MAX
);
if
(
tmp
&&
(
tmp
->
vm_flags
&
VM_GROWSDOWN
))
{
/* Avoid prev check if possible */
if
(
vm_start_gap
(
tmp
)
<=
gap_end
)
{
high_limit
=
vm_start_gap
(
tmp
);
mas_reset
(
&
mas
);
goto
retry
;
}
}
else
{
tmp
=
mas_prev
(
&
mas
,
0
);
if
(
tmp
&&
vm_end_gap
(
tmp
)
>
gap
)
{
high_limit
=
tmp
->
vm_start
;
mas_reset
(
&
mas
);
goto
retry
;
}
}
return
gap
;
}
...
...
mm/page_alloc.c
View file @
f8f238ff
...
...
@@ -5796,7 +5796,21 @@ static void __build_all_zonelists(void *data)
int
nid
;
int
__maybe_unused
cpu
;
pg_data_t
*
self
=
data
;
unsigned
long
flags
;
/*
* Explicitly disable this CPU's interrupts before taking seqlock
* to prevent any IRQ handler from calling into the page allocator
* (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock.
*/
local_irq_save
(
flags
);
/*
* Explicitly disable this CPU's synchronous printk() before taking
* seqlock to prevent any printk() from trying to hold port->lock, for
* tty_insert_flip_string_and_push_buffer() on other CPU might be
* calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
*/
printk_deferred_enter
();
write_seqlock
(
&
zonelist_update_seq
);
#ifdef CONFIG_NUMA
...
...
@@ -5835,6 +5849,8 @@ static void __build_all_zonelists(void *data)
}
write_sequnlock
(
&
zonelist_update_seq
);
printk_deferred_exit
();
local_irq_restore
(
flags
);
}
static
noinline
void
__init
...
...
@@ -6884,6 +6900,9 @@ static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
if
(
PageReserved
(
page
))
return
false
;
if
(
PageHuge
(
page
))
return
false
;
}
return
true
;
}
...
...
mm/vmalloc.c
View file @
f8f238ff
...
...
@@ -313,7 +313,7 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
ioremap_max_page_shift
);
flush_cache_vmap
(
addr
,
end
);
if
(
!
err
)
kmsan_ioremap_page_range
(
addr
,
end
,
phys_addr
,
prot
,
err
=
kmsan_ioremap_page_range
(
addr
,
end
,
phys_addr
,
prot
,
ioremap_max_page_shift
);
return
err
;
}
...
...
@@ -605,7 +605,11 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
int
vmap_pages_range_noflush
(
unsigned
long
addr
,
unsigned
long
end
,
pgprot_t
prot
,
struct
page
**
pages
,
unsigned
int
page_shift
)
{
kmsan_vmap_pages_range_noflush
(
addr
,
end
,
prot
,
pages
,
page_shift
);
int
ret
=
kmsan_vmap_pages_range_noflush
(
addr
,
end
,
prot
,
pages
,
page_shift
);
if
(
ret
)
return
ret
;
return
__vmap_pages_range_noflush
(
addr
,
end
,
prot
,
pages
,
page_shift
);
}
...
...
tools/Makefile
View file @
f8f238ff
...
...
@@ -39,7 +39,7 @@ help:
@
echo
' turbostat - Intel CPU idle stats and freq reporting tool'
@
echo
' usb - USB testing tools'
@
echo
' virtio - vhost test module'
@
echo
'
vm - misc v
m tools'
@
echo
'
mm - misc m
m tools'
@
echo
' wmi - WMI interface examples'
@
echo
' x86_energy_perf_policy - Intel energy policy tool'
@
echo
''
...
...
@@ -69,7 +69,7 @@ acpi: FORCE
cpupower
:
FORCE
$(
call
descend,power/
$@
)
cgroup counter firewire hv guest bootconfig spi usb virtio
v
m bpf iio gpio objtool leds wmi pci firmware debugging tracing
:
FORCE
cgroup counter firewire hv guest bootconfig spi usb virtio
m
m bpf iio gpio objtool leds wmi pci firmware debugging tracing
:
FORCE
$(
call
descend,
$@
)
bpf/%
:
FORCE
...
...
@@ -118,7 +118,7 @@ kvm_stat: FORCE
all
:
acpi cgroup counter cpupower gpio hv firewire
\
perf selftests bootconfig spi turbostat usb
\
virtio
v
m bpf x86_energy_perf_policy
\
virtio
m
m bpf x86_energy_perf_policy
\
tmon freefall iio objtool kvm_stat wmi
\
pci debugging tracing thermal thermometer thermal-engine
...
...
@@ -128,7 +128,7 @@ acpi_install:
cpupower_install
:
$(
call
descend,power/
$
(
@:_install
=
)
,install
)
cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install
v
m_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install
:
cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install
m
m_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install
:
$(
call
descend,
$
(
@:_install
=
)
,install
)
selftests_install
:
...
...
@@ -158,7 +158,7 @@ kvm_stat_install:
install
:
acpi_install cgroup_install counter_install cpupower_install gpio_install
\
hv_install firewire_install iio_install
\
perf_install selftests_install turbostat_install usb_install
\
virtio_install
v
m_install bpf_install x86_energy_perf_policy_install
\
virtio_install
m
m_install bpf_install x86_energy_perf_policy_install
\
tmon_install freefall_install objtool_install kvm_stat_install
\
wmi_install pci_install debugging_install intel-speed-select_install
\
tracing_install thermometer_install thermal-engine_install
...
...
@@ -169,7 +169,7 @@ acpi_clean:
cpupower_clean
:
$(
call
descend,power/cpupower,clean
)
cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean
v
m_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean
:
cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean
m
m_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean
:
$(
call
descend,
$
(
@:_clean
=
)
,clean
)
libapi_clean
:
...
...
@@ -211,7 +211,7 @@ build_clean:
clean
:
acpi_clean cgroup_clean counter_clean cpupower_clean hv_clean firewire_clean
\
perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean
\
v
m_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean
\
m
m_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean
\
freefall_clean build_clean libbpf_clean libsubcmd_clean
\
gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean
\
intel-speed-select_clean tracing_clean thermal_clean thermometer_clean thermal-engine_clean
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment