Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
415cb479
Commit
415cb479
authored
Aug 04, 2010
by
Pekka Enberg
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'slab/fixes', 'slob/fixes', 'slub/cleanups' and 'slub/fixes' into for-linus
parents
9fe6206f
78b43536
d602daba
2bce6485
bc6488e9
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
56 additions
and
49 deletions
+56
-49
include/linux/page-flags.h
include/linux/page-flags.h
+0
-2
include/linux/slab.h
include/linux/slab.h
+4
-2
mm/slab.c
mm/slab.c
+1
-1
mm/slob.c
mm/slob.c
+8
-1
mm/slub.c
mm/slub.c
+43
-43
No files found.
include/linux/page-flags.h
View file @
415cb479
...
...
@@ -128,7 +128,6 @@ enum pageflags {
/* SLUB */
PG_slub_frozen
=
PG_active
,
PG_slub_debug
=
PG_error
,
};
#ifndef __GENERATING_BOUNDS_H
...
...
@@ -215,7 +214,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__PAGEFLAG
(
SlobFree
,
slob_free
)
__PAGEFLAG
(
SlubFrozen
,
slub_frozen
)
__PAGEFLAG
(
SlubDebug
,
slub_debug
)
/*
* Private page markings that may be used by the filesystem that owns the page
...
...
include/linux/slab.h
View file @
415cb479
...
...
@@ -268,7 +268,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* allocator where we care about the real place the memory allocation
* request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
extern
void
*
__kmalloc_track_caller
(
size_t
,
gfp_t
,
unsigned
long
);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
...
...
@@ -286,7 +287,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* standard allocator where we care about the real place the memory
* allocation request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
extern
void
*
__kmalloc_node_track_caller
(
size_t
,
gfp_t
,
int
,
unsigned
long
);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
...
...
mm/slab.c
View file @
415cb479
...
...
@@ -861,7 +861,7 @@ static void __cpuinit start_cpu_timer(int cpu)
*/
if
(
keventd_up
()
&&
reap_work
->
work
.
func
==
NULL
)
{
init_reap_node
(
cpu
);
INIT_DELAYED_WORK
(
reap_work
,
cache_reap
);
INIT_DELAYED_WORK
_DEFERRABLE
(
reap_work
,
cache_reap
);
schedule_delayed_work_on
(
cpu
,
reap_work
,
__round_jiffies_relative
(
HZ
,
cpu
));
}
...
...
mm/slob.c
View file @
415cb479
...
...
@@ -394,6 +394,7 @@ static void slob_free(void *block, int size)
slob_t
*
prev
,
*
next
,
*
b
=
(
slob_t
*
)
block
;
slobidx_t
units
;
unsigned
long
flags
;
struct
list_head
*
slob_list
;
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
block
)))
return
;
...
...
@@ -422,7 +423,13 @@ static void slob_free(void *block, int size)
set_slob
(
b
,
units
,
(
void
*
)((
unsigned
long
)(
b
+
SLOB_UNITS
(
PAGE_SIZE
))
&
PAGE_MASK
));
set_slob_page_free
(
sp
,
&
free_slob_small
);
if
(
size
<
SLOB_BREAK1
)
slob_list
=
&
free_slob_small
;
else
if
(
size
<
SLOB_BREAK2
)
slob_list
=
&
free_slob_medium
;
else
slob_list
=
&
free_slob_large
;
set_slob_page_free
(
sp
,
slob_list
);
goto
out
;
}
...
...
mm/slub.c
View file @
415cb479
...
...
@@ -107,11 +107,17 @@
* the fast path and disables lockless freelists.
*/
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DEBUG_FREE)
static
inline
int
kmem_cache_debug
(
struct
kmem_cache
*
s
)
{
#ifdef CONFIG_SLUB_DEBUG
#define SLABDEBUG 1
return
unlikely
(
s
->
flags
&
SLAB_DEBUG_FLAGS
);
#else
#define SLABDEBUG 0
return
0
;
#endif
}
/*
* Issues still to be resolved:
...
...
@@ -162,8 +168,8 @@
#define MAX_OBJS_PER_PAGE 65535
/* since page.objects is u16 */
/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000
/* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000
/* Not yet visible via sysfs */
#define __OBJECT_POISON 0x80000000
UL
/* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000
UL
/* Not yet visible via sysfs */
static
int
kmem_size
=
sizeof
(
struct
kmem_cache
);
...
...
@@ -1073,7 +1079,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
flags
|=
__GFP_NOTRACK
;
if
(
node
==
-
1
)
if
(
node
==
NUMA_NO_NODE
)
return
alloc_pages
(
flags
,
order
);
else
return
alloc_pages_exact_node
(
node
,
flags
,
order
);
...
...
@@ -1157,9 +1163,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
inc_slabs_node
(
s
,
page_to_nid
(
page
),
page
->
objects
);
page
->
slab
=
s
;
page
->
flags
|=
1
<<
PG_slab
;
if
(
s
->
flags
&
(
SLAB_DEBUG_FREE
|
SLAB_RED_ZONE
|
SLAB_POISON
|
SLAB_STORE_USER
|
SLAB_TRACE
))
__SetPageSlubDebug
(
page
);
start
=
page_address
(
page
);
...
...
@@ -1186,14 +1189,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
int
order
=
compound_order
(
page
);
int
pages
=
1
<<
order
;
if
(
unlikely
(
SLABDEBUG
&&
PageSlubDebug
(
page
)
))
{
if
(
kmem_cache_debug
(
s
))
{
void
*
p
;
slab_pad_check
(
s
,
page
);
for_each_object
(
p
,
s
,
page_address
(
page
),
page
->
objects
)
check_object
(
s
,
page
,
p
,
0
);
__ClearPageSlubDebug
(
page
);
}
kmemcheck_free_shadow
(
page
,
compound_order
(
page
));
...
...
@@ -1387,10 +1389,10 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
static
struct
page
*
get_partial
(
struct
kmem_cache
*
s
,
gfp_t
flags
,
int
node
)
{
struct
page
*
page
;
int
searchnode
=
(
node
==
-
1
)
?
numa_node_id
()
:
node
;
int
searchnode
=
(
node
==
NUMA_NO_NODE
)
?
numa_node_id
()
:
node
;
page
=
get_partial_node
(
get_node
(
s
,
searchnode
));
if
(
page
||
(
flags
&
__GFP_THISNODE
)
)
if
(
page
||
node
!=
-
1
)
return
page
;
return
get_any_partial
(
s
,
flags
);
...
...
@@ -1415,8 +1417,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
stat
(
s
,
tail
?
DEACTIVATE_TO_TAIL
:
DEACTIVATE_TO_HEAD
);
}
else
{
stat
(
s
,
DEACTIVATE_FULL
);
if
(
SLABDEBUG
&&
PageSlubDebug
(
page
)
&&
(
s
->
flags
&
SLAB_STORE_USER
))
if
(
kmem_cache_debug
(
s
)
&&
(
s
->
flags
&
SLAB_STORE_USER
))
add_full
(
n
,
page
);
}
slab_unlock
(
page
);
...
...
@@ -1515,7 +1516,7 @@ static void flush_all(struct kmem_cache *s)
static
inline
int
node_match
(
struct
kmem_cache_cpu
*
c
,
int
node
)
{
#ifdef CONFIG_NUMA
if
(
node
!=
-
1
&&
c
->
node
!=
node
)
if
(
node
!=
NUMA_NO_NODE
&&
c
->
node
!=
node
)
return
0
;
#endif
return
1
;
...
...
@@ -1624,7 +1625,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
object
=
c
->
page
->
freelist
;
if
(
unlikely
(
!
object
))
goto
another_slab
;
if
(
unlikely
(
SLABDEBUG
&&
PageSlubDebug
(
c
->
page
)
))
if
(
kmem_cache_debug
(
s
))
goto
debug
;
c
->
freelist
=
get_freepointer
(
s
,
object
);
...
...
@@ -1727,7 +1728,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
-
1
,
_RET_IP_
);
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
trace_kmem_cache_alloc
(
_RET_IP_
,
ret
,
s
->
objsize
,
s
->
size
,
gfpflags
);
...
...
@@ -1738,7 +1739,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_notrace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
)
{
return
slab_alloc
(
s
,
gfpflags
,
-
1
,
_RET_IP_
);
return
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
}
EXPORT_SYMBOL
(
kmem_cache_alloc_notrace
);
#endif
...
...
@@ -1783,7 +1784,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
stat
(
s
,
FREE_SLOWPATH
);
slab_lock
(
page
);
if
(
unlikely
(
SLABDEBUG
&&
PageSlubDebug
(
page
)
))
if
(
kmem_cache_debug
(
s
))
goto
debug
;
checks_ok:
...
...
@@ -2490,7 +2491,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
s
->
refcount
--
;
if
(
!
s
->
refcount
)
{
list_del
(
&
s
->
list
);
up_write
(
&
slub_lock
);
if
(
kmem_cache_close
(
s
))
{
printk
(
KERN_ERR
"SLUB %s: %s called for cache that "
"still has objects.
\n
"
,
s
->
name
,
__func__
);
...
...
@@ -2499,7 +2499,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
if
(
s
->
flags
&
SLAB_DESTROY_BY_RCU
)
rcu_barrier
();
sysfs_slab_remove
(
s
);
}
else
}
up_write
(
&
slub_lock
);
}
EXPORT_SYMBOL
(
kmem_cache_destroy
);
...
...
@@ -2728,7 +2728,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
flags
,
-
1
,
_RET_IP_
);
ret
=
slab_alloc
(
s
,
flags
,
NUMA_NO_NODE
,
_RET_IP_
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
flags
);
...
...
@@ -3118,9 +3118,12 @@ void __init kmem_cache_init(void)
slab_state
=
UP
;
/* Provide the correct kmalloc names now that the caches are up */
for
(
i
=
KMALLOC_SHIFT_LOW
;
i
<
SLUB_PAGE_SHIFT
;
i
++
)
kmalloc_caches
[
i
].
name
=
kasprintf
(
GFP_NOWAIT
,
"kmalloc-%d"
,
1
<<
i
);
for
(
i
=
KMALLOC_SHIFT_LOW
;
i
<
SLUB_PAGE_SHIFT
;
i
++
)
{
char
*
s
=
kasprintf
(
GFP_NOWAIT
,
"kmalloc-%d"
,
1
<<
i
);
BUG_ON
(
!
s
);
kmalloc_caches
[
i
].
name
=
s
;
}
#ifdef CONFIG_SMP
register_cpu_notifier
(
&
slab_notifier
);
...
...
@@ -3223,14 +3226,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
*/
s
->
objsize
=
max
(
s
->
objsize
,
(
int
)
size
);
s
->
inuse
=
max_t
(
int
,
s
->
inuse
,
ALIGN
(
size
,
sizeof
(
void
*
)));
up_write
(
&
slub_lock
);
if
(
sysfs_slab_alias
(
s
,
name
))
{
down_write
(
&
slub_lock
);
s
->
refcount
--
;
up_write
(
&
slub_lock
);
goto
err
;
}
up_write
(
&
slub_lock
);
return
s
;
}
...
...
@@ -3239,14 +3240,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if
(
kmem_cache_open
(
s
,
GFP_KERNEL
,
name
,
size
,
align
,
flags
,
ctor
))
{
list_add
(
&
s
->
list
,
&
slab_caches
);
up_write
(
&
slub_lock
);
if
(
sysfs_slab_add
(
s
))
{
down_write
(
&
slub_lock
);
list_del
(
&
s
->
list
);
up_write
(
&
slub_lock
);
kfree
(
s
);
goto
err
;
}
up_write
(
&
slub_lock
);
return
s
;
}
kfree
(
s
);
...
...
@@ -3312,7 +3311,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
gfpflags
,
-
1
,
caller
);
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
caller
);
/* Honor the call site pointer we recieved. */
trace_kmalloc
(
caller
,
ret
,
size
,
s
->
size
,
gfpflags
);
...
...
@@ -3395,16 +3394,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
}
else
printk
(
KERN_INFO
"SLUB %s: Skipped busy slab 0x%p
\n
"
,
s
->
name
,
page
);
if
(
s
->
flags
&
DEBUG_DEFAULT_FLAGS
)
{
if
(
!
PageSlubDebug
(
page
))
printk
(
KERN_ERR
"SLUB %s: SlubDebug not set "
"on slab 0x%p
\n
"
,
s
->
name
,
page
);
}
else
{
if
(
PageSlubDebug
(
page
))
printk
(
KERN_ERR
"SLUB %s: SlubDebug set on "
"slab 0x%p
\n
"
,
s
->
name
,
page
);
}
}
static
int
validate_slab_node
(
struct
kmem_cache
*
s
,
...
...
@@ -4504,6 +4493,13 @@ static int sysfs_slab_add(struct kmem_cache *s)
static
void
sysfs_slab_remove
(
struct
kmem_cache
*
s
)
{
if
(
slab_state
<
SYSFS
)
/*
* Sysfs has not been setup yet so no need to remove the
* cache from sysfs.
*/
return
;
kobject_uevent
(
&
s
->
kobj
,
KOBJ_REMOVE
);
kobject_del
(
&
s
->
kobj
);
kobject_put
(
&
s
->
kobj
);
...
...
@@ -4549,8 +4545,11 @@ static int __init slab_sysfs_init(void)
struct
kmem_cache
*
s
;
int
err
;
down_write
(
&
slub_lock
);
slab_kset
=
kset_create_and_add
(
"slab"
,
&
slab_uevent_ops
,
kernel_kobj
);
if
(
!
slab_kset
)
{
up_write
(
&
slub_lock
);
printk
(
KERN_ERR
"Cannot register slab subsystem.
\n
"
);
return
-
ENOSYS
;
}
...
...
@@ -4575,6 +4574,7 @@ static int __init slab_sysfs_init(void)
kfree
(
al
);
}
up_write
(
&
slub_lock
);
resiliency_test
();
return
0
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment