Commit 5470e17c authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] idr: remove counter bits from id's

idr_get_new() currently returns an incrementing counter in the top 8 bits of
the counter.  Which means that most users have to mask it off again, and we
only have a 24-bit range.

So remove that counter.  Also:

- Remove the BITS_PER_INT define due to namespace collision risk.

- Make MAX_ID_SHIFT 31, so counters have a 0 to 2G-1 range.

- Why is MAX_ID_SHIFT using sizeof(int) and not sizeof(long)?  If it's for
  consistency across 32- and 64-bit machines, why not just make it "31"?

- Does this still hold true with the counter removed?

/* We can only use half the bits in the top level because there are
   only four possible bits in the top level (5 bits * 4 levels = 25
   bits, but you only use 24 bits in the id). */

  If not, what needs to change?
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 90e518e1
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/bitops.h> #include <asm/bitops.h>
#define RESERVED_ID_BITS 8
#if BITS_PER_LONG == 32 #if BITS_PER_LONG == 32
# define IDR_BITS 5 # define IDR_BITS 5
# define IDR_FULL 0xfffffffful # define IDR_FULL 0xfffffffful
...@@ -32,11 +30,8 @@ ...@@ -32,11 +30,8 @@
#define IDR_SIZE (1 << IDR_BITS) #define IDR_SIZE (1 << IDR_BITS)
#define IDR_MASK ((1 << IDR_BITS)-1) #define IDR_MASK ((1 << IDR_BITS)-1)
/* Define the size of the id's */ #define MAX_ID_SHIFT (sizeof(int)*8 - 1)
#define BITS_PER_INT (sizeof(int)*8) #define MAX_ID_BIT (1U << MAX_ID_SHIFT)
#define MAX_ID_SHIFT (BITS_PER_INT - RESERVED_ID_BITS)
#define MAX_ID_BIT (1 << MAX_ID_SHIFT)
#define MAX_ID_MASK (MAX_ID_BIT - 1) #define MAX_ID_MASK (MAX_ID_BIT - 1)
/* Leave the possibility of an incomplete final layer */ /* Leave the possibility of an incomplete final layer */
...@@ -46,25 +41,23 @@ ...@@ -46,25 +41,23 @@
#define IDR_FREE_MAX MAX_LEVEL + MAX_LEVEL #define IDR_FREE_MAX MAX_LEVEL + MAX_LEVEL
struct idr_layer { struct idr_layer {
unsigned long bitmap; /* A zero bit means "space here" */ unsigned long bitmap; /* A zero bit means "space here" */
struct idr_layer *ary[1<<IDR_BITS]; struct idr_layer *ary[1<<IDR_BITS];
int count; /* When zero, we can release it */ int count; /* When zero, we can release it */
}; };
struct idr { struct idr {
struct idr_layer *top; struct idr_layer *top;
struct idr_layer *id_free; struct idr_layer *id_free;
long count;
int layers; int layers;
int id_free_cnt; int id_free_cnt;
spinlock_t lock; spinlock_t lock;
}; };
#define IDR_INIT(name) \ #define IDR_INIT(name) \
{ \ { \
.top = NULL, \ .top = NULL, \
.id_free = NULL, \ .id_free = NULL, \
.count = 0, \
.layers = 0, \ .layers = 0, \
.id_free_cnt = 0, \ .id_free_cnt = 0, \
.lock = SPIN_LOCK_UNLOCKED, \ .lock = SPIN_LOCK_UNLOCKED, \
...@@ -82,4 +75,3 @@ void idr_remove(struct idr *idp, int id); ...@@ -82,4 +75,3 @@ void idr_remove(struct idr *idp, int id);
void idr_init(struct idr *idp); void idr_init(struct idr *idp);
extern kmem_cache_t *idr_layer_cache; extern kmem_cache_t *idr_layer_cache;
...@@ -27,15 +27,6 @@ ...@@ -27,15 +27,6 @@
* so you don't need to be too concerned about locking and conflicts * so you don't need to be too concerned about locking and conflicts
* with the slab allocator. * with the slab allocator.
* A word on reuse. We reuse empty id slots as soon as we can, always
* using the lowest one available. But we also merge a counter in the
* high bits of the id. The counter is RESERVED_ID_BITS (8 at this time)
* long. This means that if you allocate and release the same id in a
* loop we will reuse an id after about 256 times around the loop. The
* word about is used here as we will NOT return a valid id of -1 so if
* you loop on the largest possible id (and that is 24 bits, wow!) we
* will kick the counter to avoid -1. (Paranoid? You bet!)
*
* What you need to do is, since we don't keep the counter as part of * What you need to do is, since we don't keep the counter as part of
* id / ptr pair, to keep a copy of it in the pointed to structure * id / ptr pair, to keep a copy of it in the pointed to structure
* (or else where) so that when you ask for a ptr you can varify that * (or else where) so that when you ask for a ptr you can varify that
...@@ -78,7 +69,8 @@ ...@@ -78,7 +69,8 @@
* If memory is required, it will return -EAGAIN, you should unlock * If memory is required, it will return -EAGAIN, you should unlock
* and go back to the idr_pre_get() call. If the idr is full, it * and go back to the idr_pre_get() call. If the idr is full, it
* will return a -ENOSPC. ptr is the pointer you want associated * will return a -ENOSPC. ptr is the pointer you want associated
* with the id. The value is returned in the "id" field. * with the id. The value is returned in the "id" field. idr_get_new()
* returns a value in the range 0 ... 0x7fffffff
* void *idr_find(struct idr *idp, int id); * void *idr_find(struct idr *idp, int id);
...@@ -271,12 +263,6 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id) ...@@ -271,12 +263,6 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id)
v = sub_alloc(idp, ptr, &id); v = sub_alloc(idp, ptr, &id);
if (v == -2) if (v == -2)
goto build_up; goto build_up;
if ( likely(v >= 0 )) {
idp->count++;
v += (idp->count << MAX_ID_SHIFT);
if ( unlikely( v == -1 ))
v += (1L << MAX_ID_SHIFT);
}
return(v); return(v);
} }
EXPORT_SYMBOL(idr_get_new_above); EXPORT_SYMBOL(idr_get_new_above);
...@@ -334,6 +320,7 @@ static void sub_remove(struct idr *idp, int shift, int id) ...@@ -334,6 +320,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
idp->layers = 0; idp->layers = 0;
} }
} }
void idr_remove(struct idr *idp, int id) void idr_remove(struct idr *idp, int id)
{ {
struct idr_layer *p; struct idr_layer *p;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment