Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
0cc4bd8f
Commit
0cc4bd8f
authored
Jan 28, 2020
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'core/kprobes' into perf/core, to pick up fixes
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
56ee04aa
31537cf8
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
45 additions
and
25 deletions
+45
-25
kernel/kprobes.c
kernel/kprobes.c
+43
-24
kernel/trace/trace_syscalls.c
kernel/trace/trace_syscalls.c
+2
-1
No files found.
kernel/kprobes.c
View file @
0cc4bd8f
...
@@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
...
@@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
mutex_unlock
(
&
kprobe_mutex
);
mutex_unlock
(
&
kprobe_mutex
);
}
}
static
bool
optprobe_queued_unopt
(
struct
optimized_kprobe
*
op
)
{
struct
optimized_kprobe
*
_op
;
list_for_each_entry
(
_op
,
&
unoptimizing_list
,
list
)
{
if
(
op
==
_op
)
return
true
;
}
return
false
;
}
/* Optimize kprobe if p is ready to be optimized */
/* Optimize kprobe if p is ready to be optimized */
static
void
optimize_kprobe
(
struct
kprobe
*
p
)
static
void
optimize_kprobe
(
struct
kprobe
*
p
)
{
{
...
@@ -633,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p)
...
@@ -633,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p)
return
;
return
;
/* Check if it is already optimized. */
/* Check if it is already optimized. */
if
(
op
->
kp
.
flags
&
KPROBE_FLAG_OPTIMIZED
)
if
(
op
->
kp
.
flags
&
KPROBE_FLAG_OPTIMIZED
)
{
if
(
optprobe_queued_unopt
(
op
))
{
/* This is under unoptimizing. Just dequeue the probe */
list_del_init
(
&
op
->
list
);
}
return
;
return
;
}
op
->
kp
.
flags
|=
KPROBE_FLAG_OPTIMIZED
;
op
->
kp
.
flags
|=
KPROBE_FLAG_OPTIMIZED
;
if
(
!
list_empty
(
&
op
->
list
))
/* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
/* This is under unoptimizing. Just dequeue the probe */
if
(
WARN_ON_ONCE
(
!
list_empty
(
&
op
->
list
)))
list_del_init
(
&
op
->
list
)
;
return
;
else
{
list_add
(
&
op
->
list
,
&
optimizing_list
);
list_add
(
&
op
->
list
,
&
optimizing_list
);
kick_kprobe_optimizer
();
kick_kprobe_optimizer
();
}
}
}
/* Short cut to direct unoptimizing */
/* Short cut to direct unoptimizing */
...
@@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
...
@@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
return
;
/* This is not an optprobe nor optimized */
return
;
/* This is not an optprobe nor optimized */
op
=
container_of
(
p
,
struct
optimized_kprobe
,
kp
);
op
=
container_of
(
p
,
struct
optimized_kprobe
,
kp
);
if
(
!
kprobe_optimized
(
p
))
{
if
(
!
kprobe_optimized
(
p
))
/* Unoptimized or unoptimizing case */
return
;
if
(
force
&&
!
list_empty
(
&
op
->
list
))
{
if
(
!
list_empty
(
&
op
->
list
))
{
if
(
optprobe_queued_unopt
(
op
))
{
/* Queued in unoptimizing queue */
if
(
force
)
{
/*
/*
* Only if this is unoptimizing kprobe and forced,
* Forcibly unoptimize the kprobe here, and queue it
* forcibly unoptimize it. (No need to unoptimize
* in the freeing list for release afterwards.
* unoptimized kprobe again :)
*/
*/
list_del_init
(
&
op
->
list
);
force_unoptimize_kprobe
(
op
);
force_unoptimize_kprobe
(
op
);
list_move
(
&
op
->
list
,
&
freeing_list
);
}
}
return
;
}
else
{
}
/* Dequeue from the optimizing queue */
if
(
!
list_empty
(
&
op
->
list
))
{
/* Dequeue from the optimization queue */
list_del_init
(
&
op
->
list
);
list_del_init
(
&
op
->
list
);
op
->
kp
.
flags
&=
~
KPROBE_FLAG_OPTIMIZED
;
}
return
;
return
;
}
}
/* Optimized kprobe case */
/* Optimized kprobe case */
if
(
force
)
if
(
force
)
{
/* Forcibly update the code: this is a special case */
/* Forcibly update the code: this is a special case */
force_unoptimize_kprobe
(
op
);
force_unoptimize_kprobe
(
op
);
else
{
}
else
{
list_add
(
&
op
->
list
,
&
unoptimizing_list
);
list_add
(
&
op
->
list
,
&
unoptimizing_list
);
kick_kprobe_optimizer
();
kick_kprobe_optimizer
();
}
}
...
...
kernel/trace/trace_syscalls.c
View file @
0cc4bd8f
...
@@ -274,7 +274,8 @@ static int __init syscall_enter_define_fields(struct trace_event_call *call)
...
@@ -274,7 +274,8 @@ static int __init syscall_enter_define_fields(struct trace_event_call *call)
struct
syscall_trace_enter
trace
;
struct
syscall_trace_enter
trace
;
struct
syscall_metadata
*
meta
=
call
->
data
;
struct
syscall_metadata
*
meta
=
call
->
data
;
int
offset
=
offsetof
(
typeof
(
trace
),
args
);
int
offset
=
offsetof
(
typeof
(
trace
),
args
);
int
ret
,
i
;
int
ret
=
0
;
int
i
;
for
(
i
=
0
;
i
<
meta
->
nb_args
;
i
++
)
{
for
(
i
=
0
;
i
<
meta
->
nb_args
;
i
++
)
{
ret
=
trace_define_field
(
call
,
meta
->
types
[
i
],
ret
=
trace_define_field
(
call
,
meta
->
types
[
i
],
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment