Commit aba16dc5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'ida-4.19' of git://git.infradead.org/users/willy/linux-dax

Pull IDA updates from Matthew Wilcox:
 "A better IDA API:

      id = ida_alloc(ida, GFP_xxx);
      ida_free(ida, id);

  rather than the cumbersome ida_simple_get(), ida_simple_remove().

  The new IDA API is similar to ida_simple_get() but better named.  The
  internal restructuring of the IDA code removes the bitmap
  preallocation nonsense.

  I hope the net -200 lines of code is convincing"

* 'ida-4.19' of git://git.infradead.org/users/willy/linux-dax: (29 commits)
  ida: Change ida_get_new_above to return the id
  ida: Remove old API
  test_ida: check_ida_destroy and check_ida_alloc
  test_ida: Convert check_ida_conv to new API
  test_ida: Move ida_check_max
  test_ida: Move ida_check_leaf
  idr-test: Convert ida_check_nomem to new API
  ida: Start new test_ida module
  target/iscsi: Allocate session IDs from an IDA
  iscsi target: fix session creation failure handling
  drm/vmwgfx: Convert to new IDA API
  dmaengine: Convert to new IDA API
  ppc: Convert vas ID allocation to new IDA API
  media: Convert entity ID allocation to new IDA API
  ppc: Convert mmu context allocation to new IDA API
  Convert net_namespace to new IDA API
  cb710: Convert to new IDA API
  rsxx: Convert to new IDA API
  osd: Convert to new IDA API
  sd: Convert to new IDA API
  ...
parents c4726e77 1df89519
...@@ -26,48 +26,16 @@ ...@@ -26,48 +26,16 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida); static DEFINE_IDA(mmu_context_ida);
static int alloc_context_id(int min_id, int max_id) static int alloc_context_id(int min_id, int max_id)
{ {
int index, err; return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
again:
if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
return -ENOMEM;
spin_lock(&mmu_context_lock);
err = ida_get_new_above(&mmu_context_ida, min_id, &index);
spin_unlock(&mmu_context_lock);
if (err == -EAGAIN)
goto again;
else if (err)
return err;
if (index > max_id) {
spin_lock(&mmu_context_lock);
ida_remove(&mmu_context_ida, index);
spin_unlock(&mmu_context_lock);
return -ENOMEM;
}
return index;
} }
void hash__reserve_context_id(int id) void hash__reserve_context_id(int id)
{ {
int rc, result = 0; int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
do {
if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
break;
spin_lock(&mmu_context_lock);
rc = ida_get_new_above(&mmu_context_ida, id, &result);
spin_unlock(&mmu_context_lock);
} while (rc == -EAGAIN);
WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
} }
...@@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
void __destroy_context(int context_id) void __destroy_context(int context_id)
{ {
spin_lock(&mmu_context_lock); ida_free(&mmu_context_ida, context_id);
ida_remove(&mmu_context_ida, context_id);
spin_unlock(&mmu_context_lock);
} }
EXPORT_SYMBOL_GPL(__destroy_context); EXPORT_SYMBOL_GPL(__destroy_context);
...@@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx) ...@@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx)
{ {
int index, context_id; int index, context_id;
spin_lock(&mmu_context_lock);
for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
context_id = ctx->extended_id[index]; context_id = ctx->extended_id[index];
if (context_id) if (context_id)
ida_remove(&mmu_context_ida, context_id); ida_free(&mmu_context_ida, context_id);
} }
spin_unlock(&mmu_context_lock);
} }
static void pte_frag_destroy(void *pte_frag) static void pte_frag_destroy(void *pte_frag)
......
...@@ -515,35 +515,17 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx) ...@@ -515,35 +515,17 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
return 0; return 0;
} }
static DEFINE_SPINLOCK(vas_ida_lock);
static void vas_release_window_id(struct ida *ida, int winid) static void vas_release_window_id(struct ida *ida, int winid)
{ {
spin_lock(&vas_ida_lock); ida_free(ida, winid);
ida_remove(ida, winid);
spin_unlock(&vas_ida_lock);
} }
static int vas_assign_window_id(struct ida *ida) static int vas_assign_window_id(struct ida *ida)
{ {
int rc, winid; int winid = ida_alloc_max(ida, VAS_WINDOWS_PER_CHIP - 1, GFP_KERNEL);
do {
rc = ida_pre_get(ida, GFP_KERNEL);
if (!rc)
return -EAGAIN;
spin_lock(&vas_ida_lock);
rc = ida_get_new(ida, &winid);
spin_unlock(&vas_ida_lock);
} while (rc == -EAGAIN);
if (rc)
return rc;
if (winid > VAS_WINDOWS_PER_CHIP) { if (winid == -ENOSPC) {
pr_err("Too many (%d) open windows\n", winid); pr_err("Too many (%d) open windows\n", VAS_WINDOWS_PER_CHIP);
vas_release_window_id(ida, winid);
return -EAGAIN; return -EAGAIN;
} }
......
...@@ -118,7 +118,6 @@ static struct dentry *dfs_device_status; ...@@ -118,7 +118,6 @@ static struct dentry *dfs_device_status;
static u32 cpu_use[NR_CPUS]; static u32 cpu_use[NR_CPUS];
static DEFINE_SPINLOCK(rssd_index_lock);
static DEFINE_IDA(rssd_index_ida); static DEFINE_IDA(rssd_index_ida);
static int mtip_block_initialize(struct driver_data *dd); static int mtip_block_initialize(struct driver_data *dd);
...@@ -3767,20 +3766,10 @@ static int mtip_block_initialize(struct driver_data *dd) ...@@ -3767,20 +3766,10 @@ static int mtip_block_initialize(struct driver_data *dd)
goto alloc_disk_error; goto alloc_disk_error;
} }
/* Generate the disk name, implemented same as in sd.c */ rv = ida_alloc(&rssd_index_ida, GFP_KERNEL);
do { if (rv < 0)
if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) {
rv = -ENOMEM;
goto ida_get_error;
}
spin_lock(&rssd_index_lock);
rv = ida_get_new(&rssd_index_ida, &index);
spin_unlock(&rssd_index_lock);
} while (rv == -EAGAIN);
if (rv)
goto ida_get_error; goto ida_get_error;
index = rv;
rv = rssd_disk_name_format("rssd", rv = rssd_disk_name_format("rssd",
index, index,
...@@ -3922,9 +3911,7 @@ static int mtip_block_initialize(struct driver_data *dd) ...@@ -3922,9 +3911,7 @@ static int mtip_block_initialize(struct driver_data *dd)
block_queue_alloc_tag_error: block_queue_alloc_tag_error:
mtip_hw_debugfs_exit(dd); mtip_hw_debugfs_exit(dd);
disk_index_error: disk_index_error:
spin_lock(&rssd_index_lock); ida_free(&rssd_index_ida, index);
ida_remove(&rssd_index_ida, index);
spin_unlock(&rssd_index_lock);
ida_get_error: ida_get_error:
put_disk(dd->disk); put_disk(dd->disk);
...@@ -4012,9 +3999,7 @@ static int mtip_block_remove(struct driver_data *dd) ...@@ -4012,9 +3999,7 @@ static int mtip_block_remove(struct driver_data *dd)
} }
dd->disk = NULL; dd->disk = NULL;
spin_lock(&rssd_index_lock); ida_free(&rssd_index_ida, dd->index);
ida_remove(&rssd_index_ida, dd->index);
spin_unlock(&rssd_index_lock);
/* De-initialize the protocol layer. */ /* De-initialize the protocol layer. */
mtip_hw_exit(dd); mtip_hw_exit(dd);
...@@ -4054,9 +4039,7 @@ static int mtip_block_shutdown(struct driver_data *dd) ...@@ -4054,9 +4039,7 @@ static int mtip_block_shutdown(struct driver_data *dd)
dd->queue = NULL; dd->queue = NULL;
} }
spin_lock(&rssd_index_lock); ida_free(&rssd_index_ida, dd->index);
ida_remove(&rssd_index_ida, dd->index);
spin_unlock(&rssd_index_lock);
return 0; return 0;
} }
......
...@@ -58,7 +58,6 @@ MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete " ...@@ -58,7 +58,6 @@ MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
"until the card startup has completed."); "until the card startup has completed.");
static DEFINE_IDA(rsxx_disk_ida); static DEFINE_IDA(rsxx_disk_ida);
static DEFINE_SPINLOCK(rsxx_ida_lock);
/* --------------------Debugfs Setup ------------------- */ /* --------------------Debugfs Setup ------------------- */
...@@ -771,19 +770,10 @@ static int rsxx_pci_probe(struct pci_dev *dev, ...@@ -771,19 +770,10 @@ static int rsxx_pci_probe(struct pci_dev *dev,
card->dev = dev; card->dev = dev;
pci_set_drvdata(dev, card); pci_set_drvdata(dev, card);
do { st = ida_alloc(&rsxx_disk_ida, GFP_KERNEL);
if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) { if (st < 0)
st = -ENOMEM;
goto failed_ida_get;
}
spin_lock(&rsxx_ida_lock);
st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
spin_unlock(&rsxx_ida_lock);
} while (st == -EAGAIN);
if (st)
goto failed_ida_get; goto failed_ida_get;
card->disk_id = st;
st = pci_enable_device(dev); st = pci_enable_device(dev);
if (st) if (st)
...@@ -985,9 +975,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, ...@@ -985,9 +975,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
failed_dma_mask: failed_dma_mask:
pci_disable_device(dev); pci_disable_device(dev);
failed_enable: failed_enable:
spin_lock(&rsxx_ida_lock); ida_free(&rsxx_disk_ida, card->disk_id);
ida_remove(&rsxx_disk_ida, card->disk_id);
spin_unlock(&rsxx_ida_lock);
failed_ida_get: failed_ida_get:
kfree(card); kfree(card);
...@@ -1050,6 +1038,7 @@ static void rsxx_pci_remove(struct pci_dev *dev) ...@@ -1050,6 +1038,7 @@ static void rsxx_pci_remove(struct pci_dev *dev)
pci_disable_device(dev); pci_disable_device(dev);
pci_release_regions(dev); pci_release_regions(dev);
ida_free(&rsxx_disk_ida, card->disk_id);
kfree(card); kfree(card);
} }
......
...@@ -161,9 +161,7 @@ static void chan_dev_release(struct device *dev) ...@@ -161,9 +161,7 @@ static void chan_dev_release(struct device *dev)
chan_dev = container_of(dev, typeof(*chan_dev), device); chan_dev = container_of(dev, typeof(*chan_dev), device);
if (atomic_dec_and_test(chan_dev->idr_ref)) { if (atomic_dec_and_test(chan_dev->idr_ref)) {
mutex_lock(&dma_list_mutex); ida_free(&dma_ida, chan_dev->dev_id);
ida_remove(&dma_ida, chan_dev->dev_id);
mutex_unlock(&dma_list_mutex);
kfree(chan_dev->idr_ref); kfree(chan_dev->idr_ref);
} }
kfree(chan_dev); kfree(chan_dev);
...@@ -898,17 +896,12 @@ static bool device_has_all_tx_types(struct dma_device *device) ...@@ -898,17 +896,12 @@ static bool device_has_all_tx_types(struct dma_device *device)
static int get_dma_id(struct dma_device *device) static int get_dma_id(struct dma_device *device)
{ {
int rc; int rc = ida_alloc(&dma_ida, GFP_KERNEL);
do {
if (!ida_pre_get(&dma_ida, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&dma_list_mutex);
rc = ida_get_new(&dma_ida, &device->dev_id);
mutex_unlock(&dma_list_mutex);
} while (rc == -EAGAIN);
if (rc < 0)
return rc; return rc;
device->dev_id = rc;
return 0;
} }
/** /**
...@@ -1092,9 +1085,7 @@ int dma_async_device_register(struct dma_device *device) ...@@ -1092,9 +1085,7 @@ int dma_async_device_register(struct dma_device *device)
err_out: err_out:
/* if we never registered a channel just release the idr */ /* if we never registered a channel just release the idr */
if (atomic_read(idr_ref) == 0) { if (atomic_read(idr_ref) == 0) {
mutex_lock(&dma_list_mutex); ida_free(&dma_ida, device->dev_id);
ida_remove(&dma_ida, device->dev_id);
mutex_unlock(&dma_list_mutex);
kfree(idr_ref); kfree(idr_ref);
return rc; return rc;
} }
......
...@@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, ...@@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
{ {
struct vmwgfx_gmrid_man *gman = struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv; (struct vmwgfx_gmrid_man *)man->priv;
int ret = 0;
int id; int id;
mem->mm_node = NULL; mem->mm_node = NULL;
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
return id;
spin_lock(&gman->lock); spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) { if (gman->max_gmr_pages > 0) {
gman->used_gmr_pages += bo->num_pages; gman->used_gmr_pages += bo->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto out_err_locked; goto nospace;
} }
do {
spin_unlock(&gman->lock);
if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
ret = -ENOMEM;
goto out_err;
}
spin_lock(&gman->lock);
ret = ida_get_new(&gman->gmr_ida, &id);
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
ida_remove(&gman->gmr_ida, id);
ret = 0;
goto out_err_locked;
}
} while (ret == -EAGAIN);
if (likely(ret == 0)) {
mem->mm_node = gman; mem->mm_node = gman;
mem->start = id; mem->start = id;
mem->num_pages = bo->num_pages; mem->num_pages = bo->num_pages;
} else
goto out_err_locked;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
return 0; return 0;
out_err: nospace:
spin_lock(&gman->lock);
out_err_locked:
gman->used_gmr_pages -= bo->num_pages; gman->used_gmr_pages -= bo->num_pages;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
return ret; ida_free(&gman->gmr_ida, id);
return 0;
} }
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
...@@ -105,8 +88,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, ...@@ -105,8 +88,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
(struct vmwgfx_gmrid_man *)man->priv; (struct vmwgfx_gmrid_man *)man->priv;
if (mem->mm_node) { if (mem->mm_node) {
ida_free(&gman->gmr_ida, mem->start);
spin_lock(&gman->lock); spin_lock(&gman->lock);
ida_remove(&gman->gmr_ida, mem->start);
gman->used_gmr_pages -= mem->num_pages; gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
mem->mm_node = NULL; mem->mm_node = NULL;
......
...@@ -585,18 +585,12 @@ int __must_check media_device_register_entity(struct media_device *mdev, ...@@ -585,18 +585,12 @@ int __must_check media_device_register_entity(struct media_device *mdev,
entity->num_links = 0; entity->num_links = 0;
entity->num_backlinks = 0; entity->num_backlinks = 0;
if (!ida_pre_get(&mdev->entity_internal_idx, GFP_KERNEL)) ret = ida_alloc_min(&mdev->entity_internal_idx, 1, GFP_KERNEL);
return -ENOMEM; if (ret < 0)
mutex_lock(&mdev->graph_mutex);
ret = ida_get_new_above(&mdev->entity_internal_idx, 1,
&entity->internal_idx);
if (ret < 0) {
mutex_unlock(&mdev->graph_mutex);
return ret; return ret;
} entity->internal_idx = ret;
mutex_lock(&mdev->graph_mutex);
mdev->entity_internal_idx_max = mdev->entity_internal_idx_max =
max(mdev->entity_internal_idx_max, entity->internal_idx); max(mdev->entity_internal_idx_max, entity->internal_idx);
...@@ -642,7 +636,7 @@ static void __media_device_unregister_entity(struct media_entity *entity) ...@@ -642,7 +636,7 @@ static void __media_device_unregister_entity(struct media_entity *entity)
struct media_interface *intf; struct media_interface *intf;
unsigned int i; unsigned int i;
ida_simple_remove(&mdev->entity_internal_idx, entity->internal_idx); ida_free(&mdev->entity_internal_idx, entity->internal_idx);
/* Remove all interface links pointing to this entity */ /* Remove all interface links pointing to this entity */
list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <linux/gfp.h> #include <linux/gfp.h>
static DEFINE_IDA(cb710_ida); static DEFINE_IDA(cb710_ida);
static DEFINE_SPINLOCK(cb710_ida_lock);
void cb710_pci_update_config_reg(struct pci_dev *pdev, void cb710_pci_update_config_reg(struct pci_dev *pdev,
int reg, uint32_t mask, uint32_t xor) int reg, uint32_t mask, uint32_t xor)
...@@ -205,7 +204,6 @@ static int cb710_probe(struct pci_dev *pdev, ...@@ -205,7 +204,6 @@ static int cb710_probe(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
struct cb710_chip *chip; struct cb710_chip *chip;
unsigned long flags;
u32 val; u32 val;
int err; int err;
int n = 0; int n = 0;
...@@ -256,18 +254,10 @@ static int cb710_probe(struct pci_dev *pdev, ...@@ -256,18 +254,10 @@ static int cb710_probe(struct pci_dev *pdev,
if (err) if (err)
return err; return err;
do { err = ida_alloc(&cb710_ida, GFP_KERNEL);
if (!ida_pre_get(&cb710_ida, GFP_KERNEL)) if (err < 0)
return -ENOMEM;
spin_lock_irqsave(&cb710_ida_lock, flags);
err = ida_get_new(&cb710_ida, &chip->platform_id);
spin_unlock_irqrestore(&cb710_ida_lock, flags);
if (err && err != -EAGAIN)
return err; return err;
} while (err); chip->platform_id = err;
dev_info(&pdev->dev, "id %d, IO 0x%p, IRQ %d\n", dev_info(&pdev->dev, "id %d, IO 0x%p, IRQ %d\n",
chip->platform_id, chip->iobase, pdev->irq); chip->platform_id, chip->iobase, pdev->irq);
...@@ -308,7 +298,6 @@ static int cb710_probe(struct pci_dev *pdev, ...@@ -308,7 +298,6 @@ static int cb710_probe(struct pci_dev *pdev,
static void cb710_remove_one(struct pci_dev *pdev) static void cb710_remove_one(struct pci_dev *pdev)
{ {
struct cb710_chip *chip = pci_get_drvdata(pdev); struct cb710_chip *chip = pci_get_drvdata(pdev);
unsigned long flags;
cb710_unregister_slot(chip, CB710_SLOT_SM); cb710_unregister_slot(chip, CB710_SLOT_SM);
cb710_unregister_slot(chip, CB710_SLOT_MS); cb710_unregister_slot(chip, CB710_SLOT_MS);
...@@ -317,9 +306,7 @@ static void cb710_remove_one(struct pci_dev *pdev) ...@@ -317,9 +306,7 @@ static void cb710_remove_one(struct pci_dev *pdev)
BUG_ON(atomic_read(&chip->slot_refs_count) != 0); BUG_ON(atomic_read(&chip->slot_refs_count) != 0);
#endif #endif
spin_lock_irqsave(&cb710_ida_lock, flags); ida_free(&cb710_ida, chip->platform_id);
ida_remove(&cb710_ida, chip->platform_id);
spin_unlock_irqrestore(&cb710_ida_lock, flags);
} }
static const struct pci_device_id cb710_pci_tbl[] = { static const struct pci_device_id cb710_pci_tbl[] = {
......
...@@ -423,20 +423,12 @@ static int osd_probe(struct device *dev) ...@@ -423,20 +423,12 @@ static int osd_probe(struct device *dev)
if (scsi_device->type != TYPE_OSD) if (scsi_device->type != TYPE_OSD)
return -ENODEV; return -ENODEV;
do { minor = ida_alloc_max(&osd_minor_ida, SCSI_OSD_MAX_MINOR, GFP_KERNEL);
if (!ida_pre_get(&osd_minor_ida, GFP_KERNEL)) if (minor == -ENOSPC)
return -EBUSY;
if (minor < 0)
return -ENODEV; return -ENODEV;
error = ida_get_new(&osd_minor_ida, &minor);
} while (error == -EAGAIN);
if (error)
return error;
if (minor >= SCSI_OSD_MAX_MINOR) {
error = -EBUSY;
goto err_retract_minor;
}
error = -ENOMEM; error = -ENOMEM;
oud = kzalloc(sizeof(*oud), GFP_KERNEL); oud = kzalloc(sizeof(*oud), GFP_KERNEL);
if (NULL == oud) if (NULL == oud)
...@@ -499,7 +491,7 @@ static int osd_probe(struct device *dev) ...@@ -499,7 +491,7 @@ static int osd_probe(struct device *dev)
err_free_osd: err_free_osd:
put_device(&oud->class_dev); put_device(&oud->class_dev);
err_retract_minor: err_retract_minor:
ida_remove(&osd_minor_ida, minor); ida_free(&osd_minor_ida, minor);
return error; return error;
} }
...@@ -514,7 +506,7 @@ static int osd_remove(struct device *dev) ...@@ -514,7 +506,7 @@ static int osd_remove(struct device *dev)
} }
cdev_device_del(&oud->cdev, &oud->class_dev); cdev_device_del(&oud->cdev, &oud->class_dev);
ida_remove(&osd_minor_ida, oud->minor); ida_free(&osd_minor_ida, oud->minor);
put_device(&oud->class_dev); put_device(&oud->class_dev);
return 0; return 0;
......
...@@ -123,7 +123,6 @@ static void scsi_disk_release(struct device *cdev); ...@@ -123,7 +123,6 @@ static void scsi_disk_release(struct device *cdev);
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
static void sd_print_result(const struct scsi_disk *, const char *, int); static void sd_print_result(const struct scsi_disk *, const char *, int);
static DEFINE_SPINLOCK(sd_index_lock);
static DEFINE_IDA(sd_index_ida); static DEFINE_IDA(sd_index_ida);
/* This semaphore is used to mediate the 0->1 reference get in the /* This semaphore is used to mediate the 0->1 reference get in the
...@@ -3340,16 +3339,8 @@ static int sd_probe(struct device *dev) ...@@ -3340,16 +3339,8 @@ static int sd_probe(struct device *dev)
if (!gd) if (!gd)
goto out_free; goto out_free;
do { index = ida_alloc(&sd_index_ida, GFP_KERNEL);
if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) if (index < 0) {
goto out_put;
spin_lock(&sd_index_lock);
error = ida_get_new(&sd_index_ida, &index);
spin_unlock(&sd_index_lock);
} while (error == -EAGAIN);
if (error) {
sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n"); sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
goto out_put; goto out_put;
} }
...@@ -3393,9 +3384,7 @@ static int sd_probe(struct device *dev) ...@@ -3393,9 +3384,7 @@ static int sd_probe(struct device *dev)
return 0; return 0;
out_free_index: out_free_index:
spin_lock(&sd_index_lock); ida_free(&sd_index_ida, index);
ida_remove(&sd_index_ida, index);
spin_unlock(&sd_index_lock);
out_put: out_put:
put_disk(gd); put_disk(gd);
out_free: out_free:
...@@ -3460,9 +3449,7 @@ static void scsi_disk_release(struct device *dev) ...@@ -3460,9 +3449,7 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk; struct gendisk *disk = sdkp->disk;
spin_lock(&sd_index_lock); ida_free(&sd_index_ida, sdkp->index);
ida_remove(&sd_index_ida, sdkp->index);
spin_unlock(&sd_index_lock);
disk->private_data = NULL; disk->private_data = NULL;
put_disk(disk); put_disk(disk);
......
...@@ -57,9 +57,8 @@ static DEFINE_SPINLOCK(tiqn_lock); ...@@ -57,9 +57,8 @@ static DEFINE_SPINLOCK(tiqn_lock);
static DEFINE_MUTEX(np_lock); static DEFINE_MUTEX(np_lock);
static struct idr tiqn_idr; static struct idr tiqn_idr;
struct idr sess_idr; DEFINE_IDA(sess_ida);
struct mutex auth_id_lock; struct mutex auth_id_lock;
spinlock_t sess_idr_lock;
struct iscsit_global *iscsit_global; struct iscsit_global *iscsit_global;
...@@ -700,9 +699,7 @@ static int __init iscsi_target_init_module(void) ...@@ -700,9 +699,7 @@ static int __init iscsi_target_init_module(void)
spin_lock_init(&iscsit_global->ts_bitmap_lock); spin_lock_init(&iscsit_global->ts_bitmap_lock);
mutex_init(&auth_id_lock); mutex_init(&auth_id_lock);
spin_lock_init(&sess_idr_lock);
idr_init(&tiqn_idr); idr_init(&tiqn_idr);
idr_init(&sess_idr);
ret = target_register_template(&iscsi_ops); ret = target_register_template(&iscsi_ops);
if (ret) if (ret)
...@@ -4375,10 +4372,7 @@ int iscsit_close_session(struct iscsi_session *sess) ...@@ -4375,10 +4372,7 @@ int iscsit_close_session(struct iscsi_session *sess)
pr_debug("Decremented number of active iSCSI Sessions on" pr_debug("Decremented number of active iSCSI Sessions on"
" iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions); " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
spin_lock(&sess_idr_lock); ida_free(&sess_ida, sess->session_index);
idr_remove(&sess_idr, sess->session_index);
spin_unlock(&sess_idr_lock);
kfree(sess->sess_ops); kfree(sess->sess_ops);
sess->sess_ops = NULL; sess->sess_ops = NULL;
spin_unlock_bh(&se_tpg->session_lock); spin_unlock_bh(&se_tpg->session_lock);
......
...@@ -55,9 +55,7 @@ extern struct kmem_cache *lio_ooo_cache; ...@@ -55,9 +55,7 @@ extern struct kmem_cache *lio_ooo_cache;
extern struct kmem_cache *lio_qr_cache; extern struct kmem_cache *lio_qr_cache;
extern struct kmem_cache *lio_r2t_cache; extern struct kmem_cache *lio_r2t_cache;
extern struct idr sess_idr; extern struct ida sess_ida;
extern struct mutex auth_id_lock; extern struct mutex auth_id_lock;
extern spinlock_t sess_idr_lock;
#endif /*** ISCSI_TARGET_H ***/ #endif /*** ISCSI_TARGET_H ***/
...@@ -336,22 +336,15 @@ static int iscsi_login_zero_tsih_s1( ...@@ -336,22 +336,15 @@ static int iscsi_login_zero_tsih_s1(
timer_setup(&sess->time2retain_timer, timer_setup(&sess->time2retain_timer,
iscsit_handle_time2retain_timeout, 0); iscsit_handle_time2retain_timeout, 0);
idr_preload(GFP_KERNEL); ret = ida_alloc(&sess_ida, GFP_KERNEL);
spin_lock_bh(&sess_idr_lock);
ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT);
if (ret >= 0)
sess->session_index = ret;
spin_unlock_bh(&sess_idr_lock);
idr_preload_end();
if (ret < 0) { if (ret < 0) {
pr_err("idr_alloc() for sess_idr failed\n"); pr_err("Session ID allocation failed %d\n", ret);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES); ISCSI_LOGIN_STATUS_NO_RESOURCES);
kfree(sess); goto free_sess;
return -ENOMEM;
} }
sess->session_index = ret;
sess->creation_time = get_jiffies_64(); sess->creation_time = get_jiffies_64();
/* /*
* The FFP CmdSN window values will be allocated from the TPG's * The FFP CmdSN window values will be allocated from the TPG's
...@@ -365,20 +358,26 @@ static int iscsi_login_zero_tsih_s1( ...@@ -365,20 +358,26 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES); ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n"); " struct iscsi_sess_ops.\n");
kfree(sess); goto free_id;
return -ENOMEM;
} }
sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL); sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) { if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES); ISCSI_LOGIN_STATUS_NO_RESOURCES);
kfree(sess->sess_ops); goto free_ops;
kfree(sess);
return -ENOMEM;
} }
return 0; return 0;
free_ops:
kfree(sess->sess_ops);
free_id:
ida_free(&sess_ida, sess->session_index);
free_sess:
kfree(sess);
conn->sess = NULL;
return -ENOMEM;
} }
static int iscsi_login_zero_tsih_s2( static int iscsi_login_zero_tsih_s2(
...@@ -1161,13 +1160,9 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, ...@@ -1161,13 +1160,9 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
ISCSI_LOGIN_STATUS_INIT_ERR); ISCSI_LOGIN_STATUS_INIT_ERR);
if (!zero_tsih || !conn->sess) if (!zero_tsih || !conn->sess)
goto old_sess_out; goto old_sess_out;
if (conn->sess->se_sess)
transport_free_session(conn->sess->se_sess); transport_free_session(conn->sess->se_sess);
if (conn->sess->session_index != 0) { ida_free(&sess_ida, conn->sess->session_index);
spin_lock_bh(&sess_idr_lock);
idr_remove(&sess_idr, conn->sess->session_index);
spin_unlock_bh(&sess_idr_lock);
}
kfree(conn->sess->sess_ops); kfree(conn->sess->sess_ops);
kfree(conn->sess); kfree(conn->sess);
conn->sess = NULL; conn->sess = NULL;
......
...@@ -46,7 +46,7 @@ static int pty_limit = NR_UNIX98_PTY_DEFAULT; ...@@ -46,7 +46,7 @@ static int pty_limit = NR_UNIX98_PTY_DEFAULT;
static int pty_reserve = NR_UNIX98_PTY_RESERVE; static int pty_reserve = NR_UNIX98_PTY_RESERVE;
static int pty_limit_min; static int pty_limit_min;
static int pty_limit_max = INT_MAX; static int pty_limit_max = INT_MAX;
static int pty_count; static atomic_t pty_count = ATOMIC_INIT(0);
static struct ctl_table pty_table[] = { static struct ctl_table pty_table[] = {
{ {
...@@ -93,8 +93,6 @@ static struct ctl_table pty_root_table[] = { ...@@ -93,8 +93,6 @@ static struct ctl_table pty_root_table[] = {
{} {}
}; };
static DEFINE_MUTEX(allocated_ptys_lock);
struct pts_mount_opts { struct pts_mount_opts {
int setuid; int setuid;
int setgid; int setgid;
...@@ -533,44 +531,25 @@ static struct file_system_type devpts_fs_type = { ...@@ -533,44 +531,25 @@ static struct file_system_type devpts_fs_type = {
int devpts_new_index(struct pts_fs_info *fsi) int devpts_new_index(struct pts_fs_info *fsi)
{ {
int index; int index = -ENOSPC;
int ida_ret;
retry:
if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&allocated_ptys_lock);
if (pty_count >= (pty_limit -
(fsi->mount_opts.reserve ? 0 : pty_reserve))) {
mutex_unlock(&allocated_ptys_lock);
return -ENOSPC;
}
ida_ret = ida_get_new(&fsi->allocated_ptys, &index); if (atomic_inc_return(&pty_count) >= (pty_limit -
if (ida_ret < 0) { (fsi->mount_opts.reserve ? 0 : pty_reserve)))
mutex_unlock(&allocated_ptys_lock); goto out;
if (ida_ret == -EAGAIN)
goto retry;
return -EIO;
}
if (index >= fsi->mount_opts.max) { index = ida_alloc_max(&fsi->allocated_ptys, fsi->mount_opts.max - 1,
ida_remove(&fsi->allocated_ptys, index); GFP_KERNEL);
mutex_unlock(&allocated_ptys_lock);
return -ENOSPC; out:
} if (index < 0)
pty_count++; atomic_dec(&pty_count);
mutex_unlock(&allocated_ptys_lock);
return index; return index;
} }
void devpts_kill_index(struct pts_fs_info *fsi, int idx) void devpts_kill_index(struct pts_fs_info *fsi, int idx)
{ {
mutex_lock(&allocated_ptys_lock); ida_free(&fsi->allocated_ptys, idx);
ida_remove(&fsi->allocated_ptys, idx); atomic_dec(&pty_count);
pty_count--;
mutex_unlock(&allocated_ptys_lock);
} }
/** /**
......
...@@ -61,9 +61,6 @@ __setup("mphash_entries=", set_mphash_entries); ...@@ -61,9 +61,6 @@ __setup("mphash_entries=", set_mphash_entries);
static u64 event; static u64 event;
static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_id_ida);
static DEFINE_IDA(mnt_group_ida); static DEFINE_IDA(mnt_group_ida);
static DEFINE_SPINLOCK(mnt_id_lock);
static int mnt_id_start = 0;
static int mnt_group_start = 1;
static struct hlist_head *mount_hashtable __read_mostly; static struct hlist_head *mount_hashtable __read_mostly;
static struct hlist_head *mountpoint_hashtable __read_mostly; static struct hlist_head *mountpoint_hashtable __read_mostly;
...@@ -101,50 +98,30 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry) ...@@ -101,50 +98,30 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry)
static int mnt_alloc_id(struct mount *mnt) static int mnt_alloc_id(struct mount *mnt)
{ {
int res; int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
retry:
ida_pre_get(&mnt_id_ida, GFP_KERNEL);
spin_lock(&mnt_id_lock);
res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
if (!res)
mnt_id_start = mnt->mnt_id + 1;
spin_unlock(&mnt_id_lock);
if (res == -EAGAIN)
goto retry;
if (res < 0)
return res; return res;
mnt->mnt_id = res;
return 0;
} }
static void mnt_free_id(struct mount *mnt) static void mnt_free_id(struct mount *mnt)
{ {
int id = mnt->mnt_id; ida_free(&mnt_id_ida, mnt->mnt_id);
spin_lock(&mnt_id_lock);
ida_remove(&mnt_id_ida, id);
if (mnt_id_start > id)
mnt_id_start = id;
spin_unlock(&mnt_id_lock);
} }
/* /*
* Allocate a new peer group ID * Allocate a new peer group ID
*
* mnt_group_ida is protected by namespace_sem
*/ */
static int mnt_alloc_group_id(struct mount *mnt) static int mnt_alloc_group_id(struct mount *mnt)
{ {
int res; int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
return -ENOMEM;
res = ida_get_new_above(&mnt_group_ida,
mnt_group_start,
&mnt->mnt_group_id);
if (!res)
mnt_group_start = mnt->mnt_group_id + 1;
if (res < 0)
return res; return res;
mnt->mnt_group_id = res;
return 0;
} }
/* /*
...@@ -152,10 +129,7 @@ static int mnt_alloc_group_id(struct mount *mnt) ...@@ -152,10 +129,7 @@ static int mnt_alloc_group_id(struct mount *mnt)
*/ */
void mnt_release_group_id(struct mount *mnt) void mnt_release_group_id(struct mount *mnt)
{ {
int id = mnt->mnt_group_id; ida_free(&mnt_group_ida, mnt->mnt_group_id);
ida_remove(&mnt_group_ida, id);
if (mnt_group_start > id)
mnt_group_start = id;
mnt->mnt_group_id = 0; mnt->mnt_group_id = 0;
} }
......
...@@ -981,58 +981,42 @@ void emergency_thaw_all(void) ...@@ -981,58 +981,42 @@ void emergency_thaw_all(void)
} }
} }
/*
* Unnamed block devices are dummy devices used by virtual
* filesystems which don't use real block-devices. -- jrs
*/
static DEFINE_IDA(unnamed_dev_ida); static DEFINE_IDA(unnamed_dev_ida);
static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
/* Many userspace utilities consider an FSID of 0 invalid.
* Always return at least 1 from get_anon_bdev.
*/
static int unnamed_dev_start = 1;
/**
* get_anon_bdev - Allocate a block device for filesystems which don't have one.
* @p: Pointer to a dev_t.
*
* Filesystems which don't use real block devices can call this function
* to allocate a virtual block device.
*
* Context: Any context. Frequently called while holding sb_lock.
* Return: 0 on success, -EMFILE if there are no anonymous bdevs left
* or -ENOMEM if memory allocation failed.
*/
int get_anon_bdev(dev_t *p) int get_anon_bdev(dev_t *p)
{ {
int dev; int dev;
int error;
retry: /*
if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0) * Many userspace utilities consider an FSID of 0 invalid.
return -ENOMEM; * Always return at least 1 from get_anon_bdev.
spin_lock(&unnamed_dev_lock); */
error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev); dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
if (!error) GFP_ATOMIC);
unnamed_dev_start = dev + 1; if (dev == -ENOSPC)
spin_unlock(&unnamed_dev_lock); dev = -EMFILE;
if (error == -EAGAIN) if (dev < 0)
/* We raced and lost with another CPU. */ return dev;
goto retry;
else if (error) *p = MKDEV(0, dev);
return -EAGAIN;
if (dev >= (1 << MINORBITS)) {
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, dev);
if (unnamed_dev_start > dev)
unnamed_dev_start = dev;
spin_unlock(&unnamed_dev_lock);
return -EMFILE;
}
*p = MKDEV(0, dev & MINORMASK);
return 0; return 0;
} }
EXPORT_SYMBOL(get_anon_bdev); EXPORT_SYMBOL(get_anon_bdev);
void free_anon_bdev(dev_t dev) void free_anon_bdev(dev_t dev)
{ {
int slot = MINOR(dev); ida_free(&unnamed_dev_ida, MINOR(dev));
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, slot);
if (slot < unnamed_dev_start)
unnamed_dev_start = slot;
spin_unlock(&unnamed_dev_lock);
} }
EXPORT_SYMBOL(free_anon_bdev); EXPORT_SYMBOL(free_anon_bdev);
...@@ -1040,7 +1024,6 @@ int set_anon_super(struct super_block *s, void *data) ...@@ -1040,7 +1024,6 @@ int set_anon_super(struct super_block *s, void *data)
{ {
return get_anon_bdev(&s->s_dev); return get_anon_bdev(&s->s_dev);
} }
EXPORT_SYMBOL(set_anon_super); EXPORT_SYMBOL(set_anon_super);
void kill_anon_super(struct super_block *sb) void kill_anon_super(struct super_block *sb)
...@@ -1049,7 +1032,6 @@ void kill_anon_super(struct super_block *sb) ...@@ -1049,7 +1032,6 @@ void kill_anon_super(struct super_block *sb)
generic_shutdown_super(sb); generic_shutdown_super(sb);
free_anon_bdev(dev); free_anon_bdev(dev);
} }
EXPORT_SYMBOL(kill_anon_super); EXPORT_SYMBOL(kill_anon_super);
void kill_litter_super(struct super_block *sb) void kill_litter_super(struct super_block *sb)
...@@ -1058,7 +1040,6 @@ void kill_litter_super(struct super_block *sb) ...@@ -1058,7 +1040,6 @@ void kill_litter_super(struct super_block *sb)
d_genocide(sb->s_root); d_genocide(sb->s_root);
kill_anon_super(sb); kill_anon_super(sb);
} }
EXPORT_SYMBOL(kill_litter_super); EXPORT_SYMBOL(kill_litter_super);
static int ns_test_super(struct super_block *sb, void *data) static int ns_test_super(struct super_block *sb, void *data)
......
...@@ -236,34 +236,74 @@ struct ida { ...@@ -236,34 +236,74 @@ struct ida {
} }
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) #define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); void ida_free(struct ida *, unsigned int id);
void ida_remove(struct ida *ida, int id);
void ida_destroy(struct ida *ida); void ida_destroy(struct ida *ida);
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, /**
gfp_t gfp_mask); * ida_alloc() - Allocate an unused ID.
void ida_simple_remove(struct ida *ida, unsigned int id); * @ida: IDA handle.
* @gfp: Memory allocation flags.
*
* Allocate an ID between 0 and %INT_MAX, inclusive.
*
* Context: Any context.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
static inline int ida_alloc(struct ida *ida, gfp_t gfp)
{
return ida_alloc_range(ida, 0, ~0, gfp);
}
static inline void ida_init(struct ida *ida) /**
* ida_alloc_min() - Allocate an unused ID.
* @ida: IDA handle.
* @min: Lowest ID to allocate.
* @gfp: Memory allocation flags.
*
* Allocate an ID between @min and %INT_MAX, inclusive.
*
* Context: Any context.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
{ {
INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); return ida_alloc_range(ida, min, ~0, gfp);
} }
/** /**
* ida_get_new - allocate new ID * ida_alloc_max() - Allocate an unused ID.
* @ida: idr handle * @ida: IDA handle.
* @p_id: pointer to the allocated handle * @max: Highest ID to allocate.
* @gfp: Memory allocation flags.
*
* Allocate an ID between 0 and @max, inclusive.
* *
* Simple wrapper around ida_get_new_above() w/ @starting_id of zero. * Context: Any context.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/ */
static inline int ida_get_new(struct ida *ida, int *p_id) static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
{ {
return ida_get_new_above(ida, 0, p_id); return ida_alloc_range(ida, 0, max, gfp);
} }
static inline void ida_init(struct ida *ida)
{
INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
}
#define ida_simple_get(ida, start, end, gfp) \
ida_alloc_range(ida, start, (end) - 1, gfp)
#define ida_simple_remove(ida, id) ida_free(ida, id)
static inline bool ida_is_empty(const struct ida *ida) static inline bool ida_is_empty(const struct ida *ida)
{ {
return radix_tree_empty(&ida->ida_rt); return radix_tree_empty(&ida->ida_rt);
} }
/* in lib/radix-tree.c */
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
#endif /* __IDR_H__ */ #endif /* __IDR_H__ */
...@@ -1833,6 +1833,9 @@ config TEST_HASH ...@@ -1833,6 +1833,9 @@ config TEST_HASH
This is intended to help people writing architecture-specific This is intended to help people writing architecture-specific
optimized versions. If unsure, say N. optimized versions. If unsure, say N.
config TEST_IDA
tristate "Perform selftest on IDA functions"
config TEST_PARMAN config TEST_PARMAN
tristate "Perform selftest on priority array manager" tristate "Perform selftest on priority array manager"
depends on PARMAN depends on PARMAN
......
...@@ -50,6 +50,7 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o ...@@ -50,6 +50,7 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o obj-$(CONFIG_TEST_KASAN) += test_kasan.o
CFLAGS_test_kasan.o += -fno-builtin CFLAGS_test_kasan.o += -fno-builtin
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
......
...@@ -317,18 +317,12 @@ EXPORT_SYMBOL(idr_replace); ...@@ -317,18 +317,12 @@ EXPORT_SYMBOL(idr_replace);
* bit per ID, and so is more space efficient than an IDR. To use an IDA, * bit per ID, and so is more space efficient than an IDR. To use an IDA,
* define it using DEFINE_IDA() (or embed a &struct ida in a data structure, * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
* then initialise it using ida_init()). To allocate a new ID, call * then initialise it using ida_init()). To allocate a new ID, call
* ida_simple_get(). To free an ID, call ida_simple_remove(). * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
* To free an ID, call ida_free().
* *
* If you have more complex locking requirements, use a loop around * ida_destroy() can be used to dispose of an IDA without needing to
* ida_pre_get() and ida_get_new() to allocate a new ID. Then use * free the individual IDs in it. You can use ida_is_empty() to find
* ida_remove() to free an ID. You must make sure that ida_get_new() and * out whether the IDA has any IDs currently allocated.
* ida_remove() cannot be called at the same time as each other for the
* same IDA.
*
* You can also use ida_get_new_above() if you need an ID to be allocated
* above a particular number. ida_destroy() can be used to dispose of an
* IDA without needing to free the individual IDs in it. You can use
* ida_is_empty() to find out whether the IDA has any IDs currently allocated.
* *
* IDs are currently limited to the range [0-INT_MAX]. If this is an awkward * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
* limitation, it should be quite straightforward to raise the maximum. * limitation, it should be quite straightforward to raise the maximum.
...@@ -369,25 +363,7 @@ EXPORT_SYMBOL(idr_replace); ...@@ -369,25 +363,7 @@ EXPORT_SYMBOL(idr_replace);
#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1) #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1)
/** static int ida_get_new_above(struct ida *ida, int start)
* ida_get_new_above - allocate new ID above or equal to a start id
* @ida: ida handle
* @start: id to start search at
* @id: pointer to the allocated handle
*
* Allocate new ID above or equal to @start. It should be called
* with any required locks to ensure that concurrent calls to
* ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
* Consider using ida_simple_get() if you do not have complex locking
* requirements.
*
* If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the ida_pre_get() call. If the ida is full, it will
* return %-ENOSPC. On success, it will return 0.
*
* @id returns a value in the range @start ... %0x7fffffff.
*/
int ida_get_new_above(struct ida *ida, int start, int *id)
{ {
struct radix_tree_root *root = &ida->ida_rt; struct radix_tree_root *root = &ida->ida_rt;
void __rcu **slot; void __rcu **slot;
...@@ -426,8 +402,8 @@ int ida_get_new_above(struct ida *ida, int start, int *id) ...@@ -426,8 +402,8 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
if (ebit < BITS_PER_LONG) { if (ebit < BITS_PER_LONG) {
tmp |= 1UL << ebit; tmp |= 1UL << ebit;
rcu_assign_pointer(*slot, (void *)tmp); rcu_assign_pointer(*slot, (void *)tmp);
*id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT; return new + ebit -
return 0; RADIX_TREE_EXCEPTIONAL_SHIFT;
} }
bitmap = this_cpu_xchg(ida_bitmap, NULL); bitmap = this_cpu_xchg(ida_bitmap, NULL);
if (!bitmap) if (!bitmap)
...@@ -458,8 +434,7 @@ int ida_get_new_above(struct ida *ida, int start, int *id) ...@@ -458,8 +434,7 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
RADIX_TREE_EXCEPTIONAL_ENTRY); RADIX_TREE_EXCEPTIONAL_ENTRY);
radix_tree_iter_replace(root, &iter, slot, radix_tree_iter_replace(root, &iter, slot,
bitmap); bitmap);
*id = new; return new;
return 0;
} }
bitmap = this_cpu_xchg(ida_bitmap, NULL); bitmap = this_cpu_xchg(ida_bitmap, NULL);
if (!bitmap) if (!bitmap)
...@@ -468,20 +443,11 @@ int ida_get_new_above(struct ida *ida, int start, int *id) ...@@ -468,20 +443,11 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
radix_tree_iter_replace(root, &iter, slot, bitmap); radix_tree_iter_replace(root, &iter, slot, bitmap);
} }
*id = new; return new;
return 0;
} }
} }
EXPORT_SYMBOL(ida_get_new_above);
/** static void ida_remove(struct ida *ida, int id)
* ida_remove - Free the given ID
* @ida: ida handle
* @id: ID to free
*
* This function should not be called at the same time as ida_get_new_above().
*/
void ida_remove(struct ida *ida, int id)
{ {
unsigned long index = id / IDA_BITMAP_BITS; unsigned long index = id / IDA_BITMAP_BITS;
unsigned offset = id % IDA_BITMAP_BITS; unsigned offset = id % IDA_BITMAP_BITS;
...@@ -518,99 +484,90 @@ void ida_remove(struct ida *ida, int id) ...@@ -518,99 +484,90 @@ void ida_remove(struct ida *ida, int id)
} }
return; return;
err: err:
WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
} }
EXPORT_SYMBOL(ida_remove);
/** /**
* ida_destroy - Free the contents of an ida * ida_destroy() - Free all IDs.
* @ida: ida handle * @ida: IDA handle.
*
* Calling this function frees all IDs and releases all resources used
* by an IDA. When this call returns, the IDA is empty and can be reused
* or freed. If the IDA is already empty, there is no need to call this
* function.
* *
* Calling this function releases all resources associated with an IDA. When * Context: Any context.
* this call returns, the IDA is empty and can be reused or freed. The caller
* should not allow ida_remove() or ida_get_new_above() to be called at the
* same time.
*/ */
void ida_destroy(struct ida *ida) void ida_destroy(struct ida *ida)
{ {
unsigned long flags;
struct radix_tree_iter iter; struct radix_tree_iter iter;
void __rcu **slot; void __rcu **slot;
xa_lock_irqsave(&ida->ida_rt, flags);
radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
if (!radix_tree_exception(bitmap)) if (!radix_tree_exception(bitmap))
kfree(bitmap); kfree(bitmap);
radix_tree_iter_delete(&ida->ida_rt, &iter, slot); radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
} }
xa_unlock_irqrestore(&ida->ida_rt, flags);
} }
EXPORT_SYMBOL(ida_destroy); EXPORT_SYMBOL(ida_destroy);
/** /**
* ida_simple_get - get a new id. * ida_alloc_range() - Allocate an unused ID.
* @ida: the (initialized) ida. * @ida: IDA handle.
* @start: the minimum id (inclusive, < 0x8000000) * @min: Lowest ID to allocate.
* @end: the maximum id (exclusive, < 0x8000000 or 0) * @max: Highest ID to allocate.
* @gfp_mask: memory allocation flags * @gfp: Memory allocation flags.
*
* Allocates an id in the range start <= id < end, or returns -ENOSPC.
* On memory allocation failure, returns -ENOMEM.
* *
* Compared to ida_get_new_above() this function does its own locking, and * Allocate an ID between @min and @max, inclusive. The allocated ID will
* should be used unless there are special requirements. * not exceed %INT_MAX, even if @max is larger.
* *
* Use ida_simple_remove() to get rid of an id. * Context: Any context.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/ */
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
gfp_t gfp_mask) gfp_t gfp)
{ {
int ret, id; int id = 0;
unsigned int max;
unsigned long flags; unsigned long flags;
BUG_ON((int)start < 0); if ((int)min < 0)
BUG_ON((int)end < 0); return -ENOSPC;
if (end == 0) if ((int)max < 0)
max = 0x80000000; max = INT_MAX;
else {
BUG_ON(end < start);
max = end - 1;
}
again: again:
if (!ida_pre_get(ida, gfp_mask))
return -ENOMEM;
xa_lock_irqsave(&ida->ida_rt, flags); xa_lock_irqsave(&ida->ida_rt, flags);
ret = ida_get_new_above(ida, start, &id); id = ida_get_new_above(ida, min);
if (!ret) { if (id > (int)max) {
if (id > max) {
ida_remove(ida, id); ida_remove(ida, id);
ret = -ENOSPC; id = -ENOSPC;
} else {
ret = id;
}
} }
xa_unlock_irqrestore(&ida->ida_rt, flags); xa_unlock_irqrestore(&ida->ida_rt, flags);
if (unlikely(ret == -EAGAIN)) if (unlikely(id == -EAGAIN)) {
if (!ida_pre_get(ida, gfp))
return -ENOMEM;
goto again; goto again;
}
return ret; return id;
} }
EXPORT_SYMBOL(ida_simple_get); EXPORT_SYMBOL(ida_alloc_range);
/** /**
* ida_simple_remove - remove an allocated id. * ida_free() - Release an allocated ID.
* @ida: the (initialized) ida. * @ida: IDA handle.
* @id: the id returned by ida_simple_get. * @id: Previously allocated ID.
*
* Use to release an id allocated with ida_simple_get().
* *
* Compared to ida_remove() this function does its own locking, and should be * Context: Any context.
* used unless there are special requirements.
*/ */
void ida_simple_remove(struct ida *ida, unsigned int id) void ida_free(struct ida *ida, unsigned int id)
{ {
unsigned long flags; unsigned long flags;
...@@ -619,4 +576,4 @@ void ida_simple_remove(struct ida *ida, unsigned int id) ...@@ -619,4 +576,4 @@ void ida_simple_remove(struct ida *ida, unsigned int id)
ida_remove(ida, id); ida_remove(ida, id);
xa_unlock_irqrestore(&ida->ida_rt, flags); xa_unlock_irqrestore(&ida->ida_rt, flags);
} }
EXPORT_SYMBOL(ida_simple_remove); EXPORT_SYMBOL(ida_free);
...@@ -120,7 +120,7 @@ bool is_sibling_entry(const struct radix_tree_node *parent, void *node) ...@@ -120,7 +120,7 @@ bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
static inline unsigned long static inline unsigned long
get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
{ {
return slot - parent->slots; return parent ? slot - parent->slots : 0;
} }
static unsigned int radix_tree_descend(const struct radix_tree_node *parent, static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
...@@ -2106,14 +2106,6 @@ void idr_preload(gfp_t gfp_mask) ...@@ -2106,14 +2106,6 @@ void idr_preload(gfp_t gfp_mask)
} }
EXPORT_SYMBOL(idr_preload); EXPORT_SYMBOL(idr_preload);
/**
* ida_pre_get - reserve resources for ida allocation
* @ida: ida handle
* @gfp: memory allocation flags
*
* This function should be called before calling ida_get_new_above(). If it
* is unable to allocate memory, it will return %0. On success, it returns %1.
*/
int ida_pre_get(struct ida *ida, gfp_t gfp) int ida_pre_get(struct ida *ida, gfp_t gfp)
{ {
/* /*
...@@ -2134,7 +2126,6 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) ...@@ -2134,7 +2126,6 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
return 1; return 1;
} }
EXPORT_SYMBOL(ida_pre_get);
void __rcu **idr_get_free(struct radix_tree_root *root, void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp, struct radix_tree_iter *iter, gfp_t gfp,
......
// SPDX-License-Identifier: GPL-2.0+
/*
* test_ida.c: Test the IDA API
* Copyright (c) 2016-2018 Microsoft Corporation
* Copyright (c) 2018 Oracle Corporation
* Author: Matthew Wilcox <willy@infradead.org>
*/
#include <linux/idr.h>
#include <linux/module.h>
static unsigned int tests_run;
static unsigned int tests_passed;
#ifdef __KERNEL__
void ida_dump(struct ida *ida) { }
#endif
#define IDA_BUG_ON(ida, x) do { \
tests_run++; \
if (x) { \
ida_dump(ida); \
dump_stack(); \
} else { \
tests_passed++; \
} \
} while (0)
/*
* Straightforward checks that allocating and freeing IDs work.
*/
static void ida_check_alloc(struct ida *ida)
{
int i, id;
for (i = 0; i < 10000; i++)
IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
ida_free(ida, 20);
ida_free(ida, 21);
for (i = 0; i < 3; i++) {
id = ida_alloc(ida, GFP_KERNEL);
IDA_BUG_ON(ida, id < 0);
if (i == 2)
IDA_BUG_ON(ida, id != 10000);
}
for (i = 0; i < 5000; i++)
ida_free(ida, i);
IDA_BUG_ON(ida, ida_alloc_min(ida, 5000, GFP_KERNEL) != 10001);
ida_destroy(ida);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
/* Destroy an IDA with a single entry at @base */
static void ida_check_destroy_1(struct ida *ida, unsigned int base)
{
IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != base);
IDA_BUG_ON(ida, ida_is_empty(ida));
ida_destroy(ida);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
/* Check that ida_destroy and ida_is_empty work */
static void ida_check_destroy(struct ida *ida)
{
/* Destroy an already-empty IDA */
IDA_BUG_ON(ida, !ida_is_empty(ida));
ida_destroy(ida);
IDA_BUG_ON(ida, !ida_is_empty(ida));
ida_check_destroy_1(ida, 0);
ida_check_destroy_1(ida, 1);
ida_check_destroy_1(ida, 1023);
ida_check_destroy_1(ida, 1024);
ida_check_destroy_1(ida, 12345678);
}
/*
* Check what happens when we fill a leaf and then delete it. This may
* discover mishandling of IDR_FREE.
*/
static void ida_check_leaf(struct ida *ida, unsigned int base)
{
unsigned long i;
for (i = 0; i < IDA_BITMAP_BITS; i++) {
IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
base + i);
}
ida_destroy(ida);
IDA_BUG_ON(ida, !ida_is_empty(ida));
IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != 0);
IDA_BUG_ON(ida, ida_is_empty(ida));
ida_free(ida, 0);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
/*
* Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
* Allocating up to 2^31-1 should succeed, and then allocating the next one
* should fail.
*/
static void ida_check_max(struct ida *ida)
{
unsigned long i, j;
for (j = 1; j < 65537; j *= 2) {
unsigned long base = (1UL << 31) - j;
for (i = 0; i < j; i++) {
IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
base + i);
}
IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
-ENOSPC);
ida_destroy(ida);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
}
/*
* Check handling of conversions between exceptional entries and full bitmaps.
*/
static void ida_check_conv(struct ida *ida)
{
unsigned long i;
for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
IDA_BUG_ON(ida, ida_alloc_min(ida, i + 1, GFP_KERNEL) != i + 1);
IDA_BUG_ON(ida, ida_alloc_min(ida, i + BITS_PER_LONG,
GFP_KERNEL) != i + BITS_PER_LONG);
ida_free(ida, i + 1);
ida_free(ida, i + BITS_PER_LONG);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
for (i = 0; i < IDA_BITMAP_BITS * 2; i++)
IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
for (i = IDA_BITMAP_BITS * 2; i > 0; i--)
ida_free(ida, i - 1);
IDA_BUG_ON(ida, !ida_is_empty(ida));
for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++)
IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--)
ida_free(ida, i - 1);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
static int ida_checks(void)
{
DEFINE_IDA(ida);
IDA_BUG_ON(&ida, !ida_is_empty(&ida));
ida_check_alloc(&ida);
ida_check_destroy(&ida);
ida_check_leaf(&ida, 0);
ida_check_leaf(&ida, 1024);
ida_check_leaf(&ida, 1024 * 64);
ida_check_max(&ida);
ida_check_conv(&ida);
printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
return (tests_run != tests_passed) ? 0 : -EINVAL;
}
static void ida_exit(void)
{
}
module_init(ida_checks);
module_exit(ida_exit);
MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
MODULE_LICENSE("GPL");
...@@ -1001,22 +1001,18 @@ static int register_pernet_operations(struct list_head *list, ...@@ -1001,22 +1001,18 @@ static int register_pernet_operations(struct list_head *list,
int error; int error;
if (ops->id) { if (ops->id) {
again: error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); GFP_KERNEL);
if (error < 0) { if (error < 0)
if (error == -EAGAIN) {
ida_pre_get(&net_generic_ids, GFP_KERNEL);
goto again;
}
return error; return error;
} *ops->id = error;
max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
} }
error = __register_pernet_operations(list, ops); error = __register_pernet_operations(list, ops);
if (error) { if (error) {
rcu_barrier(); rcu_barrier();
if (ops->id) if (ops->id)
ida_remove(&net_generic_ids, *ops->id); ida_free(&net_generic_ids, *ops->id);
} }
return error; return error;
...@@ -1027,7 +1023,7 @@ static void unregister_pernet_operations(struct pernet_operations *ops) ...@@ -1027,7 +1023,7 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
__unregister_pernet_operations(ops); __unregister_pernet_operations(ops);
rcu_barrier(); rcu_barrier();
if (ops->id) if (ops->id)
ida_remove(&net_generic_ids, *ops->id); ida_free(&net_generic_ids, *ops->id);
} }
/** /**
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \
LDFLAGS += -fsanitize=address -fsanitize=undefined
LDFLAGS += -fsanitize=address -fsanitize=undefined
LDLIBS+= -lpthread -lurcu LDLIBS+= -lpthread -lurcu
TARGETS = main idr-test multiorder TARGETS = main idr-test multiorder
CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o
...@@ -21,6 +22,7 @@ targets: generated/map-shift.h $(TARGETS) ...@@ -21,6 +22,7 @@ targets: generated/map-shift.h $(TARGETS)
main: $(OFILES) main: $(OFILES)
idr-test.o: ../../../lib/test_ida.c
idr-test: idr-test.o $(CORE_OFILES) idr-test: idr-test.o $(CORE_OFILES)
multiorder: multiorder.o $(CORE_OFILES) multiorder: multiorder.o $(CORE_OFILES)
......
...@@ -309,141 +309,61 @@ void idr_checks(void) ...@@ -309,141 +309,61 @@ void idr_checks(void)
idr_u32_test(0); idr_u32_test(0);
} }
#define module_init(x)
#define module_exit(x)
#define MODULE_AUTHOR(x)
#define MODULE_LICENSE(x)
#define dump_stack() assert(0)
void ida_dump(struct ida *);
#include "../../../lib/test_ida.c"
/* /*
* Check that we get the correct error when we run out of memory doing * Check that we get the correct error when we run out of memory doing
* allocations. To ensure we run out of memory, just "forget" to preload. * allocations. In userspace, GFP_NOWAIT will always fail an allocation.
* The first test is for not having a bitmap available, and the second test * The first test is for not having a bitmap available, and the second test
* is for not being able to allocate a level of the radix tree. * is for not being able to allocate a level of the radix tree.
*/ */
void ida_check_nomem(void) void ida_check_nomem(void)
{
DEFINE_IDA(ida);
int id, err;
err = ida_get_new_above(&ida, 256, &id);
assert(err == -EAGAIN);
err = ida_get_new_above(&ida, 1UL << 30, &id);
assert(err == -EAGAIN);
}
/*
* Check what happens when we fill a leaf and then delete it. This may
* discover mishandling of IDR_FREE.
*/
void ida_check_leaf(void)
{ {
DEFINE_IDA(ida); DEFINE_IDA(ida);
int id; int id;
unsigned long i;
for (i = 0; i < IDA_BITMAP_BITS; i++) { id = ida_alloc_min(&ida, 256, GFP_NOWAIT);
assert(ida_pre_get(&ida, GFP_KERNEL)); IDA_BUG_ON(&ida, id != -ENOMEM);
assert(!ida_get_new(&ida, &id)); id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT);
assert(id == i); IDA_BUG_ON(&ida, id != -ENOMEM);
} IDA_BUG_ON(&ida, !ida_is_empty(&ida));
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == 0);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
} }
/* /*
* Check handling of conversions between exceptional entries and full bitmaps. * Check handling of conversions between exceptional entries and full bitmaps.
*/ */
void ida_check_conv(void) void ida_check_conv_user(void)
{ {
DEFINE_IDA(ida); DEFINE_IDA(ida);
int id;
unsigned long i; unsigned long i;
for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, i + 1, &id));
assert(id == i + 1);
assert(!ida_get_new_above(&ida, i + BITS_PER_LONG, &id));
assert(id == i + BITS_PER_LONG);
ida_remove(&ida, i + 1);
ida_remove(&ida, i + BITS_PER_LONG);
assert(ida_is_empty(&ida));
}
assert(ida_pre_get(&ida, GFP_KERNEL));
for (i = 0; i < IDA_BITMAP_BITS * 2; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == i);
}
for (i = IDA_BITMAP_BITS * 2; i > 0; i--) {
ida_remove(&ida, i - 1);
}
assert(ida_is_empty(&ida));
for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == i);
}
for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) {
ida_remove(&ida, i - 1);
}
assert(ida_is_empty(&ida));
radix_tree_cpu_dead(1); radix_tree_cpu_dead(1);
for (i = 0; i < 1000000; i++) { for (i = 0; i < 1000000; i++) {
int err = ida_get_new(&ida, &id); int id = ida_alloc(&ida, GFP_NOWAIT);
if (err == -EAGAIN) { if (id == -ENOMEM) {
assert((i % IDA_BITMAP_BITS) == (BITS_PER_LONG - 2)); IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) !=
assert(ida_pre_get(&ida, GFP_KERNEL)); BITS_PER_LONG - 2);
err = ida_get_new(&ida, &id); id = ida_alloc(&ida, GFP_KERNEL);
} else { } else {
assert((i % IDA_BITMAP_BITS) != (BITS_PER_LONG - 2)); IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) ==
} BITS_PER_LONG - 2);
assert(!err);
assert(id == i);
} }
ida_destroy(&ida); IDA_BUG_ON(&ida, id != i);
}
/*
* Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
* Allocating up to 2^31-1 should succeed, and then allocating the next one
* should fail.
*/
void ida_check_max(void)
{
DEFINE_IDA(ida);
int id, err;
unsigned long i, j;
for (j = 1; j < 65537; j *= 2) {
unsigned long base = (1UL << 31) - j;
for (i = 0; i < j; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, base, &id));
assert(id == base + i);
} }
assert(ida_pre_get(&ida, GFP_KERNEL));
err = ida_get_new_above(&ida, base, &id);
assert(err == -ENOSPC);
ida_destroy(&ida); ida_destroy(&ida);
assert(ida_is_empty(&ida));
rcu_barrier();
}
} }
void ida_check_random(void) void ida_check_random(void)
{ {
DEFINE_IDA(ida); DEFINE_IDA(ida);
DECLARE_BITMAP(bitmap, 2048); DECLARE_BITMAP(bitmap, 2048);
int id, err;
unsigned int i; unsigned int i;
time_t s = time(NULL); time_t s = time(NULL);
...@@ -454,15 +374,11 @@ void ida_check_random(void) ...@@ -454,15 +374,11 @@ void ida_check_random(void)
int bit = i & 2047; int bit = i & 2047;
if (test_bit(bit, bitmap)) { if (test_bit(bit, bitmap)) {
__clear_bit(bit, bitmap); __clear_bit(bit, bitmap);
ida_remove(&ida, bit); ida_free(&ida, bit);
} else { } else {
__set_bit(bit, bitmap); __set_bit(bit, bitmap);
do { IDA_BUG_ON(&ida, ida_alloc_min(&ida, bit, GFP_KERNEL)
ida_pre_get(&ida, GFP_KERNEL); != bit);
err = ida_get_new_above(&ida, bit, &id);
} while (err == -EAGAIN);
assert(!err);
assert(id == bit);
} }
} }
ida_destroy(&ida); ida_destroy(&ida);
...@@ -488,71 +404,12 @@ void ida_simple_get_remove_test(void) ...@@ -488,71 +404,12 @@ void ida_simple_get_remove_test(void)
ida_destroy(&ida); ida_destroy(&ida);
} }
void ida_checks(void) void user_ida_checks(void)
{ {
DEFINE_IDA(ida);
int id;
unsigned long i;
radix_tree_cpu_dead(1); radix_tree_cpu_dead(1);
ida_check_nomem();
for (i = 0; i < 10000; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
assert(id == i);
}
ida_remove(&ida, 20);
ida_remove(&ida, 21);
for (i = 0; i < 3; i++) {
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new(&ida, &id));
if (i == 2)
assert(id == 10000);
}
for (i = 0; i < 5000; i++)
ida_remove(&ida, i);
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 5000, &id));
assert(id == 10001);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1, &id));
assert(id == 1);
ida_remove(&ida, id);
assert(ida_is_empty(&ida));
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1, &id));
ida_destroy(&ida);
assert(ida_is_empty(&ida));
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1, &id));
assert(id == 1);
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 1025, &id));
assert(id == 1025);
assert(ida_pre_get(&ida, GFP_KERNEL));
assert(!ida_get_new_above(&ida, 10000, &id));
assert(id == 10000);
ida_remove(&ida, 1025);
ida_destroy(&ida);
assert(ida_is_empty(&ida));
ida_check_leaf(); ida_check_nomem();
ida_check_max(); ida_check_conv_user();
ida_check_conv();
ida_check_random(); ida_check_random();
ida_simple_get_remove_test(); ida_simple_get_remove_test();
...@@ -582,12 +439,19 @@ void ida_thread_tests(void) ...@@ -582,12 +439,19 @@ void ida_thread_tests(void)
pthread_join(threads[i], NULL); pthread_join(threads[i], NULL);
} }
void ida_tests(void)
{
user_ida_checks();
ida_checks();
ida_exit();
ida_thread_tests();
}
int __weak main(void) int __weak main(void)
{ {
radix_tree_init(); radix_tree_init();
idr_checks(); idr_checks();
ida_checks(); ida_tests();
ida_thread_tests();
radix_tree_cpu_dead(1); radix_tree_cpu_dead(1);
rcu_barrier(); rcu_barrier();
if (nr_allocated) if (nr_allocated)
......
#include "generated/map-shift.h"
#include "../../../../include/linux/xarray.h"
...@@ -27,20 +27,22 @@ void __gang_check(unsigned long middle, long down, long up, int chunk, int hop) ...@@ -27,20 +27,22 @@ void __gang_check(unsigned long middle, long down, long up, int chunk, int hop)
item_check_present(&tree, middle + idx); item_check_present(&tree, middle + idx);
item_check_absent(&tree, middle + up); item_check_absent(&tree, middle + up);
item_gang_check_present(&tree, middle - down, if (chunk > 0) {
up + down, chunk, hop); item_gang_check_present(&tree, middle - down, up + down,
chunk, hop);
item_full_scan(&tree, middle - down, down + up, chunk); item_full_scan(&tree, middle - down, down + up, chunk);
}
item_kill_tree(&tree); item_kill_tree(&tree);
} }
void gang_check(void) void gang_check(void)
{ {
__gang_check(1 << 30, 128, 128, 35, 2); __gang_check(1UL << 30, 128, 128, 35, 2);
__gang_check(1 << 31, 128, 128, 32, 32); __gang_check(1UL << 31, 128, 128, 32, 32);
__gang_check(1 << 31, 128, 128, 32, 100); __gang_check(1UL << 31, 128, 128, 32, 100);
__gang_check(1 << 31, 128, 128, 17, 7); __gang_check(1UL << 31, 128, 128, 17, 7);
__gang_check(0xffff0000, 0, 65536, 17, 7); __gang_check(0xffff0000UL, 0, 65536, 17, 7);
__gang_check(0xfffffffe, 1, 1, 17, 7); __gang_check(0xfffffffeUL, 1, 1, 17, 7);
} }
void __big_gang_check(void) void __big_gang_check(void)
...@@ -322,7 +324,7 @@ static void single_thread_tests(bool long_run) ...@@ -322,7 +324,7 @@ static void single_thread_tests(bool long_run)
printv(2, "after dynamic_height_check: %d allocated, preempt %d\n", printv(2, "after dynamic_height_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
idr_checks(); idr_checks();
ida_checks(); ida_tests();
rcu_barrier(); rcu_barrier();
printv(2, "after idr_checks: %d allocated, preempt %d\n", printv(2, "after idr_checks: %d allocated, preempt %d\n",
nr_allocated, preempt_count); nr_allocated, preempt_count);
...@@ -369,7 +371,6 @@ int main(int argc, char **argv) ...@@ -369,7 +371,6 @@ int main(int argc, char **argv)
iteration_test(0, 10 + 90 * long_run); iteration_test(0, 10 + 90 * long_run);
iteration_test(7, 10 + 90 * long_run); iteration_test(7, 10 + 90 * long_run);
single_thread_tests(long_run); single_thread_tests(long_run);
ida_thread_tests();
/* Free any remaining preallocated nodes */ /* Free any remaining preallocated nodes */
radix_tree_cpu_dead(0); radix_tree_cpu_dead(0);
......
...@@ -39,8 +39,7 @@ void multiorder_checks(void); ...@@ -39,8 +39,7 @@ void multiorder_checks(void);
void iteration_test(unsigned order, unsigned duration); void iteration_test(unsigned order, unsigned duration);
void benchmark(void); void benchmark(void);
void idr_checks(void); void idr_checks(void);
void ida_checks(void); void ida_tests(void);
void ida_thread_tests(void);
struct item * struct item *
item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment