Commit 106a74e1 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Pekka Enberg

slab: replace free and inuse in struct slab with newly introduced active

Now, free in struct slab is same meaning as inuse.
So, remove both and replace them with active.
Acked-by: default avatarAndi Kleen <ak@linux.intel.com>
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarPekka Enberg <penberg@iki.fi>
parent 45eed508
...@@ -174,8 +174,7 @@ struct slab { ...@@ -174,8 +174,7 @@ struct slab {
struct { struct {
struct list_head list; struct list_head list;
void *s_mem; /* including colour offset */ void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */ unsigned int active; /* num of objs active in slab */
unsigned int free;
}; };
}; };
...@@ -1658,7 +1657,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) ...@@ -1658,7 +1657,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &n->slabs_partial, list) { list_for_each_entry(slabp, &n->slabs_partial, list) {
active_objs += slabp->inuse; active_objs += slabp->active;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &n->slabs_free, list) list_for_each_entry(slabp, &n->slabs_free, list)
...@@ -2451,7 +2450,7 @@ static int drain_freelist(struct kmem_cache *cache, ...@@ -2451,7 +2450,7 @@ static int drain_freelist(struct kmem_cache *cache,
slabp = list_entry(p, struct slab, list); slabp = list_entry(p, struct slab, list);
#if DEBUG #if DEBUG
BUG_ON(slabp->inuse); BUG_ON(slabp->active);
#endif #endif
list_del(&slabp->list); list_del(&slabp->list);
/* /*
...@@ -2570,9 +2569,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, ...@@ -2570,9 +2569,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep,
slabp = addr + colour_off; slabp = addr + colour_off;
colour_off += cachep->slab_size; colour_off += cachep->slab_size;
} }
slabp->inuse = 0; slabp->active = 0;
slabp->s_mem = addr + colour_off; slabp->s_mem = addr + colour_off;
slabp->free = 0;
return slabp; return slabp;
} }
...@@ -2642,12 +2640,11 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, ...@@ -2642,12 +2640,11 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
{ {
void *objp; void *objp;
slabp->inuse++; objp = index_to_obj(cachep, slabp, slab_bufctl(slabp)[slabp->active]);
objp = index_to_obj(cachep, slabp, slab_bufctl(slabp)[slabp->free]); slabp->active++;
#if DEBUG #if DEBUG
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
#endif #endif
slabp->free++;
return objp; return objp;
} }
...@@ -2663,7 +2660,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, ...@@ -2663,7 +2660,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
/* Verify double free bug */ /* Verify double free bug */
for (i = slabp->free; i < cachep->num; i++) { for (i = slabp->active; i < cachep->num; i++) {
if (slab_bufctl(slabp)[i] == objnr) { if (slab_bufctl(slabp)[i] == objnr) {
printk(KERN_ERR "slab: double free detected in cache " printk(KERN_ERR "slab: double free detected in cache "
"'%s', objp %p\n", cachep->name, objp); "'%s', objp %p\n", cachep->name, objp);
...@@ -2671,9 +2668,8 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, ...@@ -2671,9 +2668,8 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
} }
} }
#endif #endif
slabp->free--; slabp->active--;
slab_bufctl(slabp)[slabp->free] = objnr; slab_bufctl(slabp)[slabp->active] = objnr;
slabp->inuse--;
} }
/* /*
...@@ -2908,9 +2904,9 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -2908,9 +2904,9 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
* there must be at least one object available for * there must be at least one object available for
* allocation. * allocation.
*/ */
BUG_ON(slabp->inuse >= cachep->num); BUG_ON(slabp->active >= cachep->num);
while (slabp->inuse < cachep->num && batchcount--) { while (slabp->active < cachep->num && batchcount--) {
STATS_INC_ALLOCED(cachep); STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep); STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep); STATS_SET_HIGH(cachep);
...@@ -2921,7 +2917,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -2921,7 +2917,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
list_del(&slabp->list); list_del(&slabp->list);
if (slabp->free == cachep->num) if (slabp->active == cachep->num)
list_add(&slabp->list, &n->slabs_full); list_add(&slabp->list, &n->slabs_full);
else else
list_add(&slabp->list, &n->slabs_partial); list_add(&slabp->list, &n->slabs_partial);
...@@ -3206,14 +3202,14 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, ...@@ -3206,14 +3202,14 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
STATS_INC_ACTIVE(cachep); STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep); STATS_SET_HIGH(cachep);
BUG_ON(slabp->inuse == cachep->num); BUG_ON(slabp->active == cachep->num);
obj = slab_get_obj(cachep, slabp, nodeid); obj = slab_get_obj(cachep, slabp, nodeid);
n->free_objects--; n->free_objects--;
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
list_del(&slabp->list); list_del(&slabp->list);
if (slabp->free == cachep->num) if (slabp->active == cachep->num)
list_add(&slabp->list, &n->slabs_full); list_add(&slabp->list, &n->slabs_full);
else else
list_add(&slabp->list, &n->slabs_partial); list_add(&slabp->list, &n->slabs_partial);
...@@ -3380,7 +3376,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, ...@@ -3380,7 +3376,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
n->free_objects++; n->free_objects++;
/* fixup slab chains */ /* fixup slab chains */
if (slabp->inuse == 0) { if (slabp->active == 0) {
if (n->free_objects > n->free_limit) { if (n->free_objects > n->free_limit) {
n->free_objects -= cachep->num; n->free_objects -= cachep->num;
/* No need to drop any previously held /* No need to drop any previously held
...@@ -3441,7 +3437,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) ...@@ -3441,7 +3437,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
struct slab *slabp; struct slab *slabp;
slabp = list_entry(p, struct slab, list); slabp = list_entry(p, struct slab, list);
BUG_ON(slabp->inuse); BUG_ON(slabp->active);
i++; i++;
p = p->next; p = p->next;
...@@ -4066,22 +4062,22 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) ...@@ -4066,22 +4062,22 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
spin_lock_irq(&n->list_lock); spin_lock_irq(&n->list_lock);
list_for_each_entry(slabp, &n->slabs_full, list) { list_for_each_entry(slabp, &n->slabs_full, list) {
if (slabp->inuse != cachep->num && !error) if (slabp->active != cachep->num && !error)
error = "slabs_full accounting error"; error = "slabs_full accounting error";
active_objs += cachep->num; active_objs += cachep->num;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &n->slabs_partial, list) { list_for_each_entry(slabp, &n->slabs_partial, list) {
if (slabp->inuse == cachep->num && !error) if (slabp->active == cachep->num && !error)
error = "slabs_partial inuse accounting error"; error = "slabs_partial accounting error";
if (!slabp->inuse && !error) if (!slabp->active && !error)
error = "slabs_partial/inuse accounting error"; error = "slabs_partial accounting error";
active_objs += slabp->inuse; active_objs += slabp->active;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &n->slabs_free, list) { list_for_each_entry(slabp, &n->slabs_free, list) {
if (slabp->inuse && !error) if (slabp->active && !error)
error = "slabs_free/inuse accounting error"; error = "slabs_free accounting error";
num_slabs++; num_slabs++;
} }
free_objects += n->free_objects; free_objects += n->free_objects;
...@@ -4243,7 +4239,7 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) ...@@ -4243,7 +4239,7 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
bool active = true; bool active = true;
for (j = s->free; j < c->num; j++) { for (j = s->active; j < c->num; j++) {
/* Skip freed item */ /* Skip freed item */
if (slab_bufctl(s)[j] == i) { if (slab_bufctl(s)[j] == i) {
active = false; active = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment