Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
c53badd0
Commit
c53badd0
authored
Mar 20, 2011
by
Pekka Enberg
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'slab/next' into for-linus
parents
521cb40b
865d794d
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
107 additions
and
88 deletions
+107
-88
include/linux/slab.h
include/linux/slab.h
+0
-1
include/linux/slub_def.h
include/linux/slub_def.h
+1
-0
mm/slab.c
mm/slab.c
+25
-30
mm/slob.c
mm/slob.c
+0
-6
mm/slub.c
mm/slub.c
+81
-51
No files found.
include/linux/slab.h
View file @
c53badd0
...
...
@@ -105,7 +105,6 @@ void kmem_cache_destroy(struct kmem_cache *);
int
kmem_cache_shrink
(
struct
kmem_cache
*
);
void
kmem_cache_free
(
struct
kmem_cache
*
,
void
*
);
unsigned
int
kmem_cache_size
(
struct
kmem_cache
*
);
const
char
*
kmem_cache_name
(
struct
kmem_cache
*
);
/*
* Please use this macro to create slab caches. Simply specify the
...
...
include/linux/slub_def.h
View file @
c53badd0
...
...
@@ -83,6 +83,7 @@ struct kmem_cache {
void
(
*
ctor
)(
void
*
);
int
inuse
;
/* Offset to metadata */
int
align
;
/* Alignment */
int
reserved
;
/* Reserved bytes at the end of slabs */
unsigned
long
min_partial
;
const
char
*
name
;
/* Name (only for display!) */
struct
list_head
list
;
/* List of slab caches */
...
...
mm/slab.c
View file @
c53badd0
...
...
@@ -190,22 +190,6 @@ typedef unsigned int kmem_bufctl_t;
#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
/*
* struct slab
*
* Manages the objs in a slab. Placed either at the beginning of mem allocated
* for a slab, or allocated from an general cache.
* Slabs are chained into three list: fully used, partial, fully free slabs.
*/
struct
slab
{
struct
list_head
list
;
unsigned
long
colouroff
;
void
*
s_mem
;
/* including colour offset */
unsigned
int
inuse
;
/* num of objs active in slab */
kmem_bufctl_t
free
;
unsigned
short
nodeid
;
};
/*
* struct slab_rcu
*
...
...
@@ -219,8 +203,6 @@ struct slab {
*
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*
* We assume struct slab_rcu can overlay struct slab when destroying.
*/
struct
slab_rcu
{
struct
rcu_head
head
;
...
...
@@ -228,6 +210,27 @@ struct slab_rcu {
void
*
addr
;
};
/*
* struct slab
*
* Manages the objs in a slab. Placed either at the beginning of mem allocated
* for a slab, or allocated from an general cache.
* Slabs are chained into three list: fully used, partial, fully free slabs.
*/
struct
slab
{
union
{
struct
{
struct
list_head
list
;
unsigned
long
colouroff
;
void
*
s_mem
;
/* including colour offset */
unsigned
int
inuse
;
/* num of objs active in slab */
kmem_bufctl_t
free
;
unsigned
short
nodeid
;
};
struct
slab_rcu
__slab_cover_slab_rcu
;
};
};
/*
* struct array_cache
*
...
...
@@ -2147,8 +2150,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
*
* @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting unloaded.
* Note that kmem_cache_name() is not guaranteed to return the same pointer,
* therefore applications must manage it themselves.
*
* The flags are
*
...
...
@@ -2288,8 +2289,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if
(
ralign
<
align
)
{
ralign
=
align
;
}
/* disable debug if n
ot aligning with REDZONE_ALIGN
*/
if
(
ralign
&
(
__alignof__
(
unsigned
long
long
)
-
1
))
/* disable debug if n
ecessary
*/
if
(
ralign
>
__alignof__
(
unsigned
long
long
))
flags
&=
~
(
SLAB_RED_ZONE
|
SLAB_STORE_USER
);
/*
* 4) Store it.
...
...
@@ -2315,8 +2316,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
*/
if
(
flags
&
SLAB_RED_ZONE
)
{
/* add space for red zone words */
cachep
->
obj_offset
+=
align
;
size
+=
align
+
sizeof
(
unsigned
long
long
);
cachep
->
obj_offset
+=
sizeof
(
unsigned
long
long
)
;
size
+=
2
*
sizeof
(
unsigned
long
long
);
}
if
(
flags
&
SLAB_STORE_USER
)
{
/* user store requires one word storage behind the end of
...
...
@@ -3840,12 +3841,6 @@ unsigned int kmem_cache_size(struct kmem_cache *cachep)
}
EXPORT_SYMBOL
(
kmem_cache_size
);
const
char
*
kmem_cache_name
(
struct
kmem_cache
*
cachep
)
{
return
cachep
->
name
;
}
EXPORT_SYMBOL_GPL
(
kmem_cache_name
);
/*
* This initializes kmem_list3 or resizes various caches for all nodes.
*/
...
...
mm/slob.c
View file @
c53badd0
...
...
@@ -666,12 +666,6 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
}
EXPORT_SYMBOL
(
kmem_cache_size
);
const
char
*
kmem_cache_name
(
struct
kmem_cache
*
c
)
{
return
c
->
name
;
}
EXPORT_SYMBOL
(
kmem_cache_name
);
int
kmem_cache_shrink
(
struct
kmem_cache
*
d
)
{
return
0
;
...
...
mm/slub.c
View file @
c53badd0
...
...
@@ -281,11 +281,40 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
return
(
p
-
addr
)
/
s
->
size
;
}
static
inline
size_t
slab_ksize
(
const
struct
kmem_cache
*
s
)
{
#ifdef CONFIG_SLUB_DEBUG
/*
* Debugging requires use of the padding between object
* and whatever may come after it.
*/
if
(
s
->
flags
&
(
SLAB_RED_ZONE
|
SLAB_POISON
))
return
s
->
objsize
;
#endif
/*
* If we have the need to store the freelist pointer
* back there or track user information then we can
* only use the space before that information.
*/
if
(
s
->
flags
&
(
SLAB_DESTROY_BY_RCU
|
SLAB_STORE_USER
))
return
s
->
inuse
;
/*
* Else we can use all the padding etc for the allocation
*/
return
s
->
size
;
}
static
inline
int
order_objects
(
int
order
,
unsigned
long
size
,
int
reserved
)
{
return
((
PAGE_SIZE
<<
order
)
-
reserved
)
/
size
;
}
static
inline
struct
kmem_cache_order_objects
oo_make
(
int
order
,
unsigned
long
size
)
unsigned
long
size
,
int
reserved
)
{
struct
kmem_cache_order_objects
x
=
{
(
order
<<
OO_SHIFT
)
+
(
PAGE_SIZE
<<
order
)
/
size
(
order
<<
OO_SHIFT
)
+
order_objects
(
order
,
size
,
reserved
)
};
return
x
;
...
...
@@ -617,7 +646,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
return
1
;
start
=
page_address
(
page
);
length
=
(
PAGE_SIZE
<<
compound_order
(
page
));
length
=
(
PAGE_SIZE
<<
compound_order
(
page
))
-
s
->
reserved
;
end
=
start
+
length
;
remainder
=
length
%
s
->
size
;
if
(
!
remainder
)
...
...
@@ -698,7 +727,7 @@ static int check_slab(struct kmem_cache *s, struct page *page)
return
0
;
}
maxobj
=
(
PAGE_SIZE
<<
compound_order
(
page
))
/
s
->
size
;
maxobj
=
order_objects
(
compound_order
(
page
),
s
->
size
,
s
->
reserved
)
;
if
(
page
->
objects
>
maxobj
)
{
slab_err
(
s
,
page
,
"objects %u > max %u"
,
s
->
name
,
page
->
objects
,
maxobj
);
...
...
@@ -748,7 +777,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
nr
++
;
}
max_objects
=
(
PAGE_SIZE
<<
compound_order
(
page
))
/
s
->
size
;
max_objects
=
order_objects
(
compound_order
(
page
),
s
->
size
,
s
->
reserved
)
;
if
(
max_objects
>
MAX_OBJS_PER_PAGE
)
max_objects
=
MAX_OBJS_PER_PAGE
;
...
...
@@ -800,7 +829,7 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
static
inline
void
slab_post_alloc_hook
(
struct
kmem_cache
*
s
,
gfp_t
flags
,
void
*
object
)
{
flags
&=
gfp_allowed_mask
;
kmemcheck_slab_alloc
(
s
,
flags
,
object
,
s
->
objsize
);
kmemcheck_slab_alloc
(
s
,
flags
,
object
,
s
lab_ksize
(
s
)
);
kmemleak_alloc_recursive
(
object
,
s
->
objsize
,
1
,
s
->
flags
,
flags
);
}
...
...
@@ -1249,21 +1278,38 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__free_pages
(
page
,
order
);
}
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
static
void
rcu_free_slab
(
struct
rcu_head
*
h
)
{
struct
page
*
page
;
if
(
need_reserve_slab_rcu
)
page
=
virt_to_head_page
(
h
);
else
page
=
container_of
((
struct
list_head
*
)
h
,
struct
page
,
lru
);
__free_slab
(
page
->
slab
,
page
);
}
static
void
free_slab
(
struct
kmem_cache
*
s
,
struct
page
*
page
)
{
if
(
unlikely
(
s
->
flags
&
SLAB_DESTROY_BY_RCU
))
{
struct
rcu_head
*
head
;
if
(
need_reserve_slab_rcu
)
{
int
order
=
compound_order
(
page
);
int
offset
=
(
PAGE_SIZE
<<
order
)
-
s
->
reserved
;
VM_BUG_ON
(
s
->
reserved
!=
sizeof
(
*
head
));
head
=
page_address
(
page
)
+
offset
;
}
else
{
/*
* RCU free overloads the RCU head over the LRU
*/
struct
rcu_head
*
head
=
(
void
*
)
&
page
->
lru
;
head
=
(
void
*
)
&
page
->
lru
;
}
call_rcu
(
head
,
rcu_free_slab
);
}
else
...
...
@@ -1988,13 +2034,13 @@ static int slub_nomerge;
* the smallest order which will fit the object.
*/
static
inline
int
slab_order
(
int
size
,
int
min_objects
,
int
max_order
,
int
fract_leftover
)
int
max_order
,
int
fract_leftover
,
int
reserved
)
{
int
order
;
int
rem
;
int
min_order
=
slub_min_order
;
if
(
(
PAGE_SIZE
<<
min_order
)
/
size
>
MAX_OBJS_PER_PAGE
)
if
(
order_objects
(
min_order
,
size
,
reserved
)
>
MAX_OBJS_PER_PAGE
)
return
get_order
(
size
*
MAX_OBJS_PER_PAGE
)
-
1
;
for
(
order
=
max
(
min_order
,
...
...
@@ -2003,10 +2049,10 @@ static inline int slab_order(int size, int min_objects,
unsigned
long
slab_size
=
PAGE_SIZE
<<
order
;
if
(
slab_size
<
min_objects
*
size
)
if
(
slab_size
<
min_objects
*
size
+
reserved
)
continue
;
rem
=
slab_size
%
size
;
rem
=
(
slab_size
-
reserved
)
%
size
;
if
(
rem
<=
slab_size
/
fract_leftover
)
break
;
...
...
@@ -2016,7 +2062,7 @@ static inline int slab_order(int size, int min_objects,
return
order
;
}
static
inline
int
calculate_order
(
int
size
)
static
inline
int
calculate_order
(
int
size
,
int
reserved
)
{
int
order
;
int
min_objects
;
...
...
@@ -2034,14 +2080,14 @@ static inline int calculate_order(int size)
min_objects
=
slub_min_objects
;
if
(
!
min_objects
)
min_objects
=
4
*
(
fls
(
nr_cpu_ids
)
+
1
);
max_objects
=
(
PAGE_SIZE
<<
slub_max_order
)
/
size
;
max_objects
=
order_objects
(
slub_max_order
,
size
,
reserved
)
;
min_objects
=
min
(
min_objects
,
max_objects
);
while
(
min_objects
>
1
)
{
fraction
=
16
;
while
(
fraction
>=
4
)
{
order
=
slab_order
(
size
,
min_objects
,
slub_max_order
,
fraction
);
slub_max_order
,
fraction
,
reserved
);
if
(
order
<=
slub_max_order
)
return
order
;
fraction
/=
2
;
...
...
@@ -2053,14 +2099,14 @@ static inline int calculate_order(int size)
* We were unable to place multiple objects in a slab. Now
* lets see if we can place a single object there.
*/
order
=
slab_order
(
size
,
1
,
slub_max_order
,
1
);
order
=
slab_order
(
size
,
1
,
slub_max_order
,
1
,
reserved
);
if
(
order
<=
slub_max_order
)
return
order
;
/*
* Doh this slab cannot be placed using slub_max_order.
*/
order
=
slab_order
(
size
,
1
,
MAX_ORDER
,
1
);
order
=
slab_order
(
size
,
1
,
MAX_ORDER
,
1
,
reserved
);
if
(
order
<
MAX_ORDER
)
return
order
;
return
-
ENOSYS
;
...
...
@@ -2311,7 +2357,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
if
(
forced_order
>=
0
)
order
=
forced_order
;
else
order
=
calculate_order
(
size
);
order
=
calculate_order
(
size
,
s
->
reserved
);
if
(
order
<
0
)
return
0
;
...
...
@@ -2329,8 +2375,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
/*
* Determine the number of objects per slab
*/
s
->
oo
=
oo_make
(
order
,
size
);
s
->
min
=
oo_make
(
get_order
(
size
),
size
);
s
->
oo
=
oo_make
(
order
,
size
,
s
->
reserved
);
s
->
min
=
oo_make
(
get_order
(
size
),
size
,
s
->
reserved
);
if
(
oo_objects
(
s
->
oo
)
>
oo_objects
(
s
->
max
))
s
->
max
=
s
->
oo
;
...
...
@@ -2349,6 +2395,10 @@ static int kmem_cache_open(struct kmem_cache *s,
s
->
objsize
=
size
;
s
->
align
=
align
;
s
->
flags
=
kmem_cache_flags
(
size
,
flags
,
name
,
ctor
);
s
->
reserved
=
0
;
if
(
need_reserve_slab_rcu
&&
(
s
->
flags
&
SLAB_DESTROY_BY_RCU
))
s
->
reserved
=
sizeof
(
struct
rcu_head
);
if
(
!
calculate_sizes
(
s
,
-
1
))
goto
error
;
...
...
@@ -2399,12 +2449,6 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
}
EXPORT_SYMBOL
(
kmem_cache_size
);
const
char
*
kmem_cache_name
(
struct
kmem_cache
*
s
)
{
return
s
->
name
;
}
EXPORT_SYMBOL
(
kmem_cache_name
);
static
void
list_slab_objects
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
const
char
*
text
)
{
...
...
@@ -2696,7 +2740,6 @@ EXPORT_SYMBOL(__kmalloc_node);
size_t
ksize
(
const
void
*
object
)
{
struct
page
*
page
;
struct
kmem_cache
*
s
;
if
(
unlikely
(
object
==
ZERO_SIZE_PTR
))
return
0
;
...
...
@@ -2707,28 +2750,8 @@ size_t ksize(const void *object)
WARN_ON
(
!
PageCompound
(
page
));
return
PAGE_SIZE
<<
compound_order
(
page
);
}
s
=
page
->
slab
;
#ifdef CONFIG_SLUB_DEBUG
/*
* Debugging requires use of the padding between object
* and whatever may come after it.
*/
if
(
s
->
flags
&
(
SLAB_RED_ZONE
|
SLAB_POISON
))
return
s
->
objsize
;
#endif
/*
* If we have the need to store the freelist pointer
* back there or track user information then we can
* only use the space before that information.
*/
if
(
s
->
flags
&
(
SLAB_DESTROY_BY_RCU
|
SLAB_STORE_USER
))
return
s
->
inuse
;
/*
* Else we can use all the padding etc for the allocation
*/
return
s
->
size
;
return
slab_ksize
(
page
->
slab
);
}
EXPORT_SYMBOL
(
ksize
);
...
...
@@ -4017,6 +4040,12 @@ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO
(
destroy_by_rcu
);
static
ssize_t
reserved_show
(
struct
kmem_cache
*
s
,
char
*
buf
)
{
return
sprintf
(
buf
,
"%d
\n
"
,
s
->
reserved
);
}
SLAB_ATTR_RO
(
reserved
);
#ifdef CONFIG_SLUB_DEBUG
static
ssize_t
slabs_show
(
struct
kmem_cache
*
s
,
char
*
buf
)
{
...
...
@@ -4303,6 +4332,7 @@ static struct attribute *slab_attrs[] = {
&
reclaim_account_attr
.
attr
,
&
destroy_by_rcu_attr
.
attr
,
&
shrink_attr
.
attr
,
&
reserved_attr
.
attr
,
#ifdef CONFIG_SLUB_DEBUG
&
total_objects_attr
.
attr
,
&
slabs_attr
.
attr
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment