Commit 48b16198 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xarray-5.0-rc3' of git://git.infradead.org/users/willy/linux-dax

Pull XArray fixes from Matthew Wilcox:
 "Fix some oversights in the XArray porcelain API:

   - support for m68k's two-byte aligned pointers

   - reserving entries using xa_insert()

   - missing xa_insert_bh() and xa_insert_irq() functions

   - simplify using xa_for_each()

   - use lockdep correctly

   - a few other minor fixes and improvements"

* tag 'xarray-5.0-rc3' of git://git.infradead.org/users/willy/linux-dax:
  XArray: Fix an arithmetic error in xa_is_err
  XArray tests: Check mark 2 gets squashed
  XArray: Fix typo in comment
  XArray: Honour reserved entries in xa_insert
  XArray: Permit storing 2-byte-aligned pointers
  XArray: Change xa_for_each iterator
  XArray: Turn xa_init_flags into a static inline
  XArray tests: Add RCU locking
parents f8ff6c73 edcddd4c
...@@ -108,12 +108,13 @@ some, but not all of the other indices changing. ...@@ -108,12 +108,13 @@ some, but not all of the other indices changing.
Sometimes you need to ensure that a subsequent call to :c:func:`xa_store` Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
will not need to allocate memory. The :c:func:`xa_reserve` function will not need to allocate memory. The :c:func:`xa_reserve` function
will store a reserved entry at the indicated index. Users of the normal will store a reserved entry at the indicated index. Users of the
API will see this entry as containing ``NULL``. If you do not need to normal API will see this entry as containing ``NULL``. If you do
use the reserved entry, you can call :c:func:`xa_release` to remove the not need to use the reserved entry, you can call :c:func:`xa_release`
unused entry. If another user has stored to the entry in the meantime, to remove the unused entry. If another user has stored to the entry
:c:func:`xa_release` will do nothing; if instead you want the entry to in the meantime, :c:func:`xa_release` will do nothing; if instead you
become ``NULL``, you should use :c:func:`xa_erase`. want the entry to become ``NULL``, you should use :c:func:`xa_erase`.
Using :c:func:`xa_insert` on a reserved entry will fail.
If all entries in the array are ``NULL``, the :c:func:`xa_empty` function If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
will return ``true``. will return ``true``.
...@@ -183,6 +184,8 @@ Takes xa_lock internally: ...@@ -183,6 +184,8 @@ Takes xa_lock internally:
* :c:func:`xa_store_bh` * :c:func:`xa_store_bh`
* :c:func:`xa_store_irq` * :c:func:`xa_store_irq`
* :c:func:`xa_insert` * :c:func:`xa_insert`
* :c:func:`xa_insert_bh`
* :c:func:`xa_insert_irq`
* :c:func:`xa_erase` * :c:func:`xa_erase`
* :c:func:`xa_erase_bh` * :c:func:`xa_erase_bh`
* :c:func:`xa_erase_irq` * :c:func:`xa_erase_irq`
......
...@@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry) ...@@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry)
*/ */
static inline bool xa_is_err(const void *entry) static inline bool xa_is_err(const void *entry)
{ {
return unlikely(xa_is_internal(entry)); return unlikely(xa_is_internal(entry) &&
entry >= xa_mk_internal(-MAX_ERRNO));
} }
/** /**
...@@ -286,7 +287,6 @@ struct xarray { ...@@ -286,7 +287,6 @@ struct xarray {
*/ */
#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
void xa_init_flags(struct xarray *, gfp_t flags);
void *xa_load(struct xarray *, unsigned long index); void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *xa_erase(struct xarray *, unsigned long index); void *xa_erase(struct xarray *, unsigned long index);
...@@ -303,6 +303,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, ...@@ -303,6 +303,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
unsigned long max, unsigned int n, xa_mark_t); unsigned long max, unsigned int n, xa_mark_t);
void xa_destroy(struct xarray *); void xa_destroy(struct xarray *);
/**
* xa_init_flags() - Initialise an empty XArray with flags.
* @xa: XArray.
* @flags: XA_FLAG values.
*
* If you need to initialise an XArray with special flags (eg you need
* to take the lock from interrupt context), use this function instead
* of xa_init().
*
* Context: Any context.
*/
static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
{
spin_lock_init(&xa->xa_lock);
xa->xa_flags = flags;
xa->xa_head = NULL;
}
/** /**
* xa_init() - Initialise an empty XArray. * xa_init() - Initialise an empty XArray.
* @xa: XArray. * @xa: XArray.
...@@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) ...@@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
} }
/** /**
* xa_for_each() - Iterate over a portion of an XArray. * xa_for_each_start() - Iterate over a portion of an XArray.
* @xa: XArray. * @xa: XArray.
* @index: Index of @entry.
* @entry: Entry retrieved from array. * @entry: Entry retrieved from array.
* @start: First index to retrieve from array.
*
* During the iteration, @entry will have the value of the entry stored
* in @xa at @index. You may modify @index during the iteration if you
* want to skip or reprocess indices. It is safe to modify the array
* during the iteration. At the end of the iteration, @entry will be set
* to NULL and @index will have a value less than or equal to max.
*
* xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
* to handle your own locking with xas_for_each(), and if you have to unlock
* after each iteration, it will also end up being O(n.log(n)).
* xa_for_each_start() will spin if it hits a retry entry; if you intend to
* see retry entries, you should use the xas_for_each() iterator instead.
* The xas_for_each() iterator will expand into more inline code than
* xa_for_each_start().
*
* Context: Any context. Takes and releases the RCU lock.
*/
#define xa_for_each_start(xa, index, entry, start) \
for (index = start, \
entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \
entry; \
entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
/**
* xa_for_each() - Iterate over present entries in an XArray.
* @xa: XArray.
* @index: Index of @entry. * @index: Index of @entry.
* @max: Maximum index to retrieve from array. * @entry: Entry retrieved from array.
* @filter: Selection criterion.
* *
* Initialise @index to the lowest index you want to retrieve from the * During the iteration, @entry will have the value of the entry stored
* array. During the iteration, @entry will have the value of the entry * in @xa at @index. You may modify @index during the iteration if you want
* stored in @xa at @index. The iteration will skip all entries in the * to skip or reprocess indices. It is safe to modify the array during the
* array which do not match @filter. You may modify @index during the * iteration. At the end of the iteration, @entry will be set to NULL and
* iteration if you want to skip or reprocess indices. It is safe to modify * @index will have a value less than or equal to max.
* the array during the iteration. At the end of the iteration, @entry will
* be set to NULL and @index will have a value less than or equal to max.
* *
* xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
* to handle your own locking with xas_for_each(), and if you have to unlock * to handle your own locking with xas_for_each(), and if you have to unlock
...@@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) ...@@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
* *
* Context: Any context. Takes and releases the RCU lock. * Context: Any context. Takes and releases the RCU lock.
*/ */
#define xa_for_each(xa, entry, index, max, filter) \ #define xa_for_each(xa, index, entry) \
for (entry = xa_find(xa, &index, max, filter); entry; \ xa_for_each_start(xa, index, entry, 0)
entry = xa_find_after(xa, &index, max, filter))
/**
* xa_for_each_marked() - Iterate over marked entries in an XArray.
* @xa: XArray.
* @index: Index of @entry.
* @entry: Entry retrieved from array.
* @filter: Selection criterion.
*
* During the iteration, @entry will have the value of the entry stored
* in @xa at @index. The iteration will skip all entries in the array
* which do not match @filter. You may modify @index during the iteration
* if you want to skip or reprocess indices. It is safe to modify the array
* during the iteration. At the end of the iteration, @entry will be set to
* NULL and @index will have a value less than or equal to max.
*
* xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
* You have to handle your own locking with xas_for_each(), and if you have
* to unlock after each iteration, it will also end up being O(n.log(n)).
* xa_for_each_marked() will spin if it hits a retry entry; if you intend to
* see retry entries, you should use the xas_for_each_marked() iterator
* instead. The xas_for_each_marked() iterator will expand into more inline
* code than xa_for_each_marked().
*
* Context: Any context. Takes and releases the RCU lock.
*/
#define xa_for_each_marked(xa, index, entry, filter) \
for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
#define xa_lock(xa) spin_lock(&(xa)->xa_lock) #define xa_lock(xa) spin_lock(&(xa)->xa_lock)
...@@ -393,39 +463,12 @@ void *__xa_erase(struct xarray *, unsigned long index); ...@@ -393,39 +463,12 @@ void *__xa_erase(struct xarray *, unsigned long index);
void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t); void *entry, gfp_t);
int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
int __xa_reserve(struct xarray *, unsigned long index, gfp_t); int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
/**
* __xa_insert() - Store this entry in the XArray unless another entry is
* already present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* If you would rather see the existing entry in the array, use __xa_cmpxchg().
* This function is for users who don't care what the entry is, only that
* one is present.
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if the @gfp flags permit.
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int __xa_insert(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
if (!curr)
return 0;
if (xa_is_err(curr))
return xa_err(curr);
return -EEXIST;
}
/** /**
* xa_store_bh() - Store this entry in the XArray. * xa_store_bh() - Store this entry in the XArray.
* @xa: XArray. * @xa: XArray.
...@@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, ...@@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
} }
/** /**
* xa_store_irq() - Erase this entry from the XArray. * xa_store_irq() - Store this entry in the XArray.
* @xa: XArray. * @xa: XArray.
* @index: Index into array. * @index: Index into array.
* @entry: New entry. * @entry: New entry.
...@@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, ...@@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
* @entry: New entry. * @entry: New entry.
* @gfp: Memory allocation flags. * @gfp: Memory allocation flags.
* *
* If you would rather see the existing entry in the array, use xa_cmpxchg(). * Inserting a NULL entry will store a reserved entry (like xa_reserve())
* This function is for users who don't care what the entry is, only that * if no entry is present. Inserting will fail if a reserved entry is
* one is present. * present, even though loading from this index will return NULL.
* *
* Context: Process context. Takes and releases the xa_lock. * Context: Any context. Takes and releases the xa_lock. May sleep if
* May sleep if the @gfp flags permit. * the @gfp flags permit.
* Return: 0 if the store succeeded. -EEXIST if another entry was present. * Return: 0 if the store succeeded. -EEXIST if another entry was present.
* -ENOMEM if memory could not be allocated. * -ENOMEM if memory could not be allocated.
*/ */
static inline int xa_insert(struct xarray *xa, unsigned long index, static inline int xa_insert(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp) void *entry, gfp_t gfp)
{ {
void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); int err;
if (!curr)
return 0; xa_lock(xa);
if (xa_is_err(curr)) err = __xa_insert(xa, index, entry, gfp);
return xa_err(curr); xa_unlock(xa);
return -EEXIST;
return err;
}
/**
* xa_insert_bh() - Store this entry in the XArray unless another entry is
* already present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
* if no entry is present. Inserting will fail if a reserved entry is
* present, even though loading from this index will return NULL.
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
int err;
xa_lock_bh(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock_bh(xa);
return err;
}
/**
* xa_insert_irq() - Store this entry in the XArray unless another entry is
* already present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
* if no entry is present. Inserting will fail if a reserved entry is
* present, even though loading from this index will return NULL.
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int xa_insert_irq(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
int err;
xa_lock_irq(xa);
err = __xa_insert(xa, index, entry, gfp);
xa_unlock_irq(xa);
return err;
} }
/** /**
...@@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry) ...@@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry)
(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
} }
#define XA_ZERO_ENTRY xa_mk_internal(256) #define XA_RETRY_ENTRY xa_mk_internal(256)
#define XA_RETRY_ENTRY xa_mk_internal(257) #define XA_ZERO_ENTRY xa_mk_internal(257)
/** /**
* xa_is_zero() - Is the entry a zero entry? * xa_is_zero() - Is the entry a zero entry?
...@@ -995,6 +1097,17 @@ static inline bool xa_is_retry(const void *entry) ...@@ -995,6 +1097,17 @@ static inline bool xa_is_retry(const void *entry)
return unlikely(entry == XA_RETRY_ENTRY); return unlikely(entry == XA_RETRY_ENTRY);
} }
/**
* xa_is_advanced() - Is the entry only permitted for the advanced API?
* @entry: Entry to be stored in the XArray.
*
* Return: %true if the entry cannot be stored by the normal API.
*/
static inline bool xa_is_advanced(const void *entry)
{
return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
}
/** /**
* typedef xa_update_node_t - A callback function from the XArray. * typedef xa_update_node_t - A callback function from the XArray.
* @node: The node which is being processed * @node: The node which is being processed
......
...@@ -199,7 +199,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) ...@@ -199,7 +199,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
xa_set_mark(xa, index + 1, XA_MARK_0); xa_set_mark(xa, index + 1, XA_MARK_0);
XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
xa_set_mark(xa, index + 2, XA_MARK_1); xa_set_mark(xa, index + 2, XA_MARK_2);
XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
xa_store_order(xa, index, order, xa_mk_index(index), xa_store_order(xa, index, order, xa_mk_index(index),
GFP_KERNEL); GFP_KERNEL);
...@@ -209,8 +209,8 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) ...@@ -209,8 +209,8 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
void *entry; void *entry;
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1)); XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
/* We should see two elements in the array */ /* We should see two elements in the array */
rcu_read_lock(); rcu_read_lock();
...@@ -357,7 +357,7 @@ static noinline void check_cmpxchg(struct xarray *xa) ...@@ -357,7 +357,7 @@ static noinline void check_cmpxchg(struct xarray *xa)
static noinline void check_reserve(struct xarray *xa) static noinline void check_reserve(struct xarray *xa)
{ {
void *entry; void *entry;
unsigned long index = 0; unsigned long index;
/* An array with a reserved entry is not empty */ /* An array with a reserved entry is not empty */
XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, !xa_empty(xa));
...@@ -382,10 +382,12 @@ static noinline void check_reserve(struct xarray *xa) ...@@ -382,10 +382,12 @@ static noinline void check_reserve(struct xarray *xa)
xa_erase_index(xa, 12345678); xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, !xa_empty(xa));
/* And so does xa_insert */ /* But xa_insert does not */
xa_reserve(xa, 12345678, GFP_KERNEL); xa_reserve(xa, 12345678, GFP_KERNEL);
XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0); XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
xa_erase_index(xa, 12345678); -EEXIST);
XA_BUG_ON(xa, xa_empty(xa));
XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, !xa_empty(xa));
/* Can iterate through a reserved entry */ /* Can iterate through a reserved entry */
...@@ -393,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa) ...@@ -393,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa)
xa_reserve(xa, 6, GFP_KERNEL); xa_reserve(xa, 6, GFP_KERNEL);
xa_store_index(xa, 7, GFP_KERNEL); xa_store_index(xa, 7, GFP_KERNEL);
xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, index != 5 && index != 7); XA_BUG_ON(xa, index != 5 && index != 7);
} }
xa_destroy(xa); xa_destroy(xa);
...@@ -812,17 +814,16 @@ static noinline void check_find_1(struct xarray *xa) ...@@ -812,17 +814,16 @@ static noinline void check_find_1(struct xarray *xa)
static noinline void check_find_2(struct xarray *xa) static noinline void check_find_2(struct xarray *xa)
{ {
void *entry; void *entry;
unsigned long i, j, index = 0; unsigned long i, j, index;
xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, true); XA_BUG_ON(xa, true);
} }
for (i = 0; i < 1024; i++) { for (i = 0; i < 1024; i++) {
xa_store_index(xa, index, GFP_KERNEL); xa_store_index(xa, index, GFP_KERNEL);
j = 0; j = 0;
index = 0; xa_for_each(xa, index, entry) {
xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
XA_BUG_ON(xa, xa_mk_index(index) != entry); XA_BUG_ON(xa, xa_mk_index(index) != entry);
XA_BUG_ON(xa, index != j++); XA_BUG_ON(xa, index != j++);
} }
...@@ -839,6 +840,7 @@ static noinline void check_find_3(struct xarray *xa) ...@@ -839,6 +840,7 @@ static noinline void check_find_3(struct xarray *xa)
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
for (j = 0; j < 100; j++) { for (j = 0; j < 100; j++) {
rcu_read_lock();
for (k = 0; k < 100; k++) { for (k = 0; k < 100; k++) {
xas_set(&xas, j); xas_set(&xas, j);
xas_for_each_marked(&xas, entry, k, XA_MARK_0) xas_for_each_marked(&xas, entry, k, XA_MARK_0)
...@@ -847,6 +849,7 @@ static noinline void check_find_3(struct xarray *xa) ...@@ -847,6 +849,7 @@ static noinline void check_find_3(struct xarray *xa)
XA_BUG_ON(xa, XA_BUG_ON(xa,
xas.xa_node != XAS_RESTART); xas.xa_node != XAS_RESTART);
} }
rcu_read_unlock();
} }
xa_store_index(xa, i, GFP_KERNEL); xa_store_index(xa, i, GFP_KERNEL);
xa_set_mark(xa, i, XA_MARK_0); xa_set_mark(xa, i, XA_MARK_0);
...@@ -1183,6 +1186,35 @@ static noinline void check_store_range(struct xarray *xa) ...@@ -1183,6 +1186,35 @@ static noinline void check_store_range(struct xarray *xa)
} }
} }
static void check_align_1(struct xarray *xa, char *name)
{
int i;
unsigned int id;
unsigned long index;
void *entry;
for (i = 0; i < 8; i++) {
id = 0;
XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL)
!= 0);
XA_BUG_ON(xa, id != i);
}
xa_for_each(xa, index, entry)
XA_BUG_ON(xa, xa_is_err(entry));
xa_destroy(xa);
}
static noinline void check_align(struct xarray *xa)
{
char name[] = "Motorola 68000";
check_align_1(xa, name);
check_align_1(xa, name + 1);
check_align_1(xa, name + 2);
check_align_1(xa, name + 3);
// check_align_2(xa, name);
}
static LIST_HEAD(shadow_nodes); static LIST_HEAD(shadow_nodes);
static void test_update_node(struct xa_node *node) static void test_update_node(struct xa_node *node)
...@@ -1332,6 +1364,7 @@ static int xarray_checks(void) ...@@ -1332,6 +1364,7 @@ static int xarray_checks(void)
check_create_range(&array); check_create_range(&array);
check_store_range(&array); check_store_range(&array);
check_store_iter(&array); check_store_iter(&array);
check_align(&xa0);
check_workingset(&array, 0); check_workingset(&array, 0);
check_workingset(&array, 64); check_workingset(&array, 64);
......
...@@ -232,6 +232,8 @@ void *xas_load(struct xa_state *xas) ...@@ -232,6 +232,8 @@ void *xas_load(struct xa_state *xas)
if (xas->xa_shift > node->shift) if (xas->xa_shift > node->shift)
break; break;
entry = xas_descend(xas, node); entry = xas_descend(xas, node);
if (node->shift == 0)
break;
} }
return entry; return entry;
} }
...@@ -506,7 +508,7 @@ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) ...@@ -506,7 +508,7 @@ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
for (;;) { for (;;) {
void *entry = xa_entry_locked(xas->xa, node, offset); void *entry = xa_entry_locked(xas->xa, node, offset);
if (xa_is_node(entry)) { if (node->shift && xa_is_node(entry)) {
node = xa_to_node(entry); node = xa_to_node(entry);
offset = 0; offset = 0;
continue; continue;
...@@ -604,6 +606,7 @@ static int xas_expand(struct xa_state *xas, void *head) ...@@ -604,6 +606,7 @@ static int xas_expand(struct xa_state *xas, void *head)
/* /*
* xas_create() - Create a slot to store an entry in. * xas_create() - Create a slot to store an entry in.
* @xas: XArray operation state. * @xas: XArray operation state.
* @allow_root: %true if we can store the entry in the root directly
* *
* Most users will not need to call this function directly, as it is called * Most users will not need to call this function directly, as it is called
* by xas_store(). It is useful for doing conditional store operations * by xas_store(). It is useful for doing conditional store operations
...@@ -613,7 +616,7 @@ static int xas_expand(struct xa_state *xas, void *head) ...@@ -613,7 +616,7 @@ static int xas_expand(struct xa_state *xas, void *head)
* If the slot was newly created, returns %NULL. If it failed to create the * If the slot was newly created, returns %NULL. If it failed to create the
* slot, returns %NULL and indicates the error in @xas. * slot, returns %NULL and indicates the error in @xas.
*/ */
static void *xas_create(struct xa_state *xas) static void *xas_create(struct xa_state *xas, bool allow_root)
{ {
struct xarray *xa = xas->xa; struct xarray *xa = xas->xa;
void *entry; void *entry;
...@@ -628,6 +631,8 @@ static void *xas_create(struct xa_state *xas) ...@@ -628,6 +631,8 @@ static void *xas_create(struct xa_state *xas)
shift = xas_expand(xas, entry); shift = xas_expand(xas, entry);
if (shift < 0) if (shift < 0)
return NULL; return NULL;
if (!shift && !allow_root)
shift = XA_CHUNK_SHIFT;
entry = xa_head_locked(xa); entry = xa_head_locked(xa);
slot = &xa->xa_head; slot = &xa->xa_head;
} else if (xas_error(xas)) { } else if (xas_error(xas)) {
...@@ -687,7 +692,7 @@ void xas_create_range(struct xa_state *xas) ...@@ -687,7 +692,7 @@ void xas_create_range(struct xa_state *xas)
xas->xa_sibs = 0; xas->xa_sibs = 0;
for (;;) { for (;;) {
xas_create(xas); xas_create(xas, true);
if (xas_error(xas)) if (xas_error(xas))
goto restore; goto restore;
if (xas->xa_index <= (index | XA_CHUNK_MASK)) if (xas->xa_index <= (index | XA_CHUNK_MASK))
...@@ -754,7 +759,7 @@ void *xas_store(struct xa_state *xas, void *entry) ...@@ -754,7 +759,7 @@ void *xas_store(struct xa_state *xas, void *entry)
bool value = xa_is_value(entry); bool value = xa_is_value(entry);
if (entry) if (entry)
first = xas_create(xas); first = xas_create(xas, !xa_is_node(entry));
else else
first = xas_load(xas); first = xas_load(xas);
...@@ -1250,35 +1255,6 @@ void *xas_find_conflict(struct xa_state *xas) ...@@ -1250,35 +1255,6 @@ void *xas_find_conflict(struct xa_state *xas)
} }
EXPORT_SYMBOL_GPL(xas_find_conflict); EXPORT_SYMBOL_GPL(xas_find_conflict);
/**
* xa_init_flags() - Initialise an empty XArray with flags.
* @xa: XArray.
* @flags: XA_FLAG values.
*
* If you need to initialise an XArray with special flags (eg you need
* to take the lock from interrupt context), use this function instead
* of xa_init().
*
* Context: Any context.
*/
void xa_init_flags(struct xarray *xa, gfp_t flags)
{
unsigned int lock_type;
static struct lock_class_key xa_lock_irq;
static struct lock_class_key xa_lock_bh;
spin_lock_init(&xa->xa_lock);
xa->xa_flags = flags;
xa->xa_head = NULL;
lock_type = xa_lock_type(xa);
if (lock_type == XA_LOCK_IRQ)
lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
else if (lock_type == XA_LOCK_BH)
lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
}
EXPORT_SYMBOL(xa_init_flags);
/** /**
* xa_load() - Load an entry from an XArray. * xa_load() - Load an entry from an XArray.
* @xa: XArray. * @xa: XArray.
...@@ -1308,7 +1284,6 @@ static void *xas_result(struct xa_state *xas, void *curr) ...@@ -1308,7 +1284,6 @@ static void *xas_result(struct xa_state *xas, void *curr)
{ {
if (xa_is_zero(curr)) if (xa_is_zero(curr))
return NULL; return NULL;
XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
if (xas_error(xas)) if (xas_error(xas))
curr = xas->xa_node; curr = xas->xa_node;
return curr; return curr;
...@@ -1378,7 +1353,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) ...@@ -1378,7 +1353,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
XA_STATE(xas, xa, index); XA_STATE(xas, xa, index);
void *curr; void *curr;
if (WARN_ON_ONCE(xa_is_internal(entry))) if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL); return XA_ERROR(-EINVAL);
if (xa_track_free(xa) && !entry) if (xa_track_free(xa) && !entry)
entry = XA_ZERO_ENTRY; entry = XA_ZERO_ENTRY;
...@@ -1444,7 +1419,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, ...@@ -1444,7 +1419,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
XA_STATE(xas, xa, index); XA_STATE(xas, xa, index);
void *curr; void *curr;
if (WARN_ON_ONCE(xa_is_internal(entry))) if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL); return XA_ERROR(-EINVAL);
if (xa_track_free(xa) && !entry) if (xa_track_free(xa) && !entry)
entry = XA_ZERO_ENTRY; entry = XA_ZERO_ENTRY;
...@@ -1464,6 +1439,47 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, ...@@ -1464,6 +1439,47 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
} }
EXPORT_SYMBOL(__xa_cmpxchg); EXPORT_SYMBOL(__xa_cmpxchg);
/**
* __xa_insert() - Store this entry in the XArray if no entry is present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
* if no entry is present. Inserting will fail if a reserved entry is
* present, even though loading from this index will return NULL.
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, index);
void *curr;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (!entry)
entry = XA_ZERO_ENTRY;
do {
curr = xas_load(&xas);
if (!curr) {
xas_store(&xas, entry);
if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
} else {
xas_set_err(&xas, -EEXIST);
}
} while (__xas_nomem(&xas, gfp));
return xas_error(&xas);
}
EXPORT_SYMBOL(__xa_insert);
/** /**
* __xa_reserve() - Reserve this index in the XArray. * __xa_reserve() - Reserve this index in the XArray.
* @xa: XArray. * @xa: XArray.
...@@ -1567,7 +1583,7 @@ void *xa_store_range(struct xarray *xa, unsigned long first, ...@@ -1567,7 +1583,7 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
if (last + 1) if (last + 1)
order = __ffs(last + 1); order = __ffs(last + 1);
xas_set_order(&xas, last, order); xas_set_order(&xas, last, order);
xas_create(&xas); xas_create(&xas, true);
if (xas_error(&xas)) if (xas_error(&xas))
goto unlock; goto unlock;
} }
...@@ -1609,7 +1625,7 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) ...@@ -1609,7 +1625,7 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
XA_STATE(xas, xa, 0); XA_STATE(xas, xa, 0);
int err; int err;
if (WARN_ON_ONCE(xa_is_internal(entry))) if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL; return -EINVAL;
if (WARN_ON_ONCE(!xa_track_free(xa))) if (WARN_ON_ONCE(!xa_track_free(xa)))
return -EINVAL; return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment