Commit fb7ffeb1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

parents 69eebed2 95ed644f
...@@ -3163,22 +3163,6 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, ...@@ -3163,22 +3163,6 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
} }
EXPORT_SYMBOL(ib_cm_init_qp_attr); EXPORT_SYMBOL(ib_cm_init_qp_attr);
static __be64 cm_get_ca_guid(struct ib_device *device)
{
struct ib_device_attr *device_attr;
__be64 guid;
int ret;
device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
if (!device_attr)
return 0;
ret = ib_query_device(device, device_attr);
guid = ret ? 0 : device_attr->node_guid;
kfree(device_attr);
return guid;
}
static void cm_add_one(struct ib_device *device) static void cm_add_one(struct ib_device *device)
{ {
struct cm_device *cm_dev; struct cm_device *cm_dev;
...@@ -3200,9 +3184,7 @@ static void cm_add_one(struct ib_device *device) ...@@ -3200,9 +3184,7 @@ static void cm_add_one(struct ib_device *device)
return; return;
cm_dev->device = device; cm_dev->device = device;
cm_dev->ca_guid = cm_get_ca_guid(device); cm_dev->ca_guid = device->node_guid;
if (!cm_dev->ca_guid)
goto error1;
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
for (i = 1; i <= device->phys_port_cnt; i++) { for (i = 1; i <= device->phys_port_cnt; i++) {
...@@ -3217,11 +3199,11 @@ static void cm_add_one(struct ib_device *device) ...@@ -3217,11 +3199,11 @@ static void cm_add_one(struct ib_device *device)
cm_recv_handler, cm_recv_handler,
port); port);
if (IS_ERR(port->mad_agent)) if (IS_ERR(port->mad_agent))
goto error2; goto error1;
ret = ib_modify_port(device, i, 0, &port_modify); ret = ib_modify_port(device, i, 0, &port_modify);
if (ret) if (ret)
goto error3; goto error2;
} }
ib_set_client_data(device, &cm_client, cm_dev); ib_set_client_data(device, &cm_client, cm_dev);
...@@ -3230,9 +3212,9 @@ static void cm_add_one(struct ib_device *device) ...@@ -3230,9 +3212,9 @@ static void cm_add_one(struct ib_device *device)
write_unlock_irqrestore(&cm.device_lock, flags); write_unlock_irqrestore(&cm.device_lock, flags);
return; return;
error3:
ib_unregister_mad_agent(port->mad_agent);
error2: error2:
ib_unregister_mad_agent(port->mad_agent);
error1:
port_modify.set_port_cap_mask = 0; port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
while (--i) { while (--i) {
...@@ -3240,7 +3222,6 @@ static void cm_add_one(struct ib_device *device) ...@@ -3240,7 +3222,6 @@ static void cm_add_one(struct ib_device *device)
ib_modify_port(device, port->port_num, 0, &port_modify); ib_modify_port(device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent); ib_unregister_mad_agent(port->mad_agent);
} }
error1:
kfree(cm_dev); kfree(cm_dev);
} }
......
...@@ -38,8 +38,7 @@ ...@@ -38,8 +38,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mutex.h>
#include <asm/semaphore.h>
#include "core_priv.h" #include "core_priv.h"
...@@ -57,13 +56,13 @@ static LIST_HEAD(device_list); ...@@ -57,13 +56,13 @@ static LIST_HEAD(device_list);
static LIST_HEAD(client_list); static LIST_HEAD(client_list);
/* /*
* device_sem protects access to both device_list and client_list. * device_mutex protects access to both device_list and client_list.
* There's no real point to using multiple locks or something fancier * There's no real point to using multiple locks or something fancier
* like an rwsem: we always access both lists, and we're always * like an rwsem: we always access both lists, and we're always
* modifying one list or the other list. In any case this is not a * modifying one list or the other list. In any case this is not a
* hot path so there's no point in trying to optimize. * hot path so there's no point in trying to optimize.
*/ */
static DECLARE_MUTEX(device_sem); static DEFINE_MUTEX(device_mutex);
static int ib_device_check_mandatory(struct ib_device *device) static int ib_device_check_mandatory(struct ib_device *device)
{ {
...@@ -221,7 +220,7 @@ int ib_register_device(struct ib_device *device) ...@@ -221,7 +220,7 @@ int ib_register_device(struct ib_device *device)
{ {
int ret; int ret;
down(&device_sem); mutex_lock(&device_mutex);
if (strchr(device->name, '%')) { if (strchr(device->name, '%')) {
ret = alloc_name(device->name); ret = alloc_name(device->name);
...@@ -259,7 +258,7 @@ int ib_register_device(struct ib_device *device) ...@@ -259,7 +258,7 @@ int ib_register_device(struct ib_device *device)
} }
out: out:
up(&device_sem); mutex_unlock(&device_mutex);
return ret; return ret;
} }
EXPORT_SYMBOL(ib_register_device); EXPORT_SYMBOL(ib_register_device);
...@@ -276,7 +275,7 @@ void ib_unregister_device(struct ib_device *device) ...@@ -276,7 +275,7 @@ void ib_unregister_device(struct ib_device *device)
struct ib_client_data *context, *tmp; struct ib_client_data *context, *tmp;
unsigned long flags; unsigned long flags;
down(&device_sem); mutex_lock(&device_mutex);
list_for_each_entry_reverse(client, &client_list, list) list_for_each_entry_reverse(client, &client_list, list)
if (client->remove) if (client->remove)
...@@ -284,7 +283,7 @@ void ib_unregister_device(struct ib_device *device) ...@@ -284,7 +283,7 @@ void ib_unregister_device(struct ib_device *device)
list_del(&device->core_list); list_del(&device->core_list);
up(&device_sem); mutex_unlock(&device_mutex);
spin_lock_irqsave(&device->client_data_lock, flags); spin_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry_safe(context, tmp, &device->client_data_list, list) list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
...@@ -312,14 +311,14 @@ int ib_register_client(struct ib_client *client) ...@@ -312,14 +311,14 @@ int ib_register_client(struct ib_client *client)
{ {
struct ib_device *device; struct ib_device *device;
down(&device_sem); mutex_lock(&device_mutex);
list_add_tail(&client->list, &client_list); list_add_tail(&client->list, &client_list);
list_for_each_entry(device, &device_list, core_list) list_for_each_entry(device, &device_list, core_list)
if (client->add && !add_client_context(device, client)) if (client->add && !add_client_context(device, client))
client->add(device); client->add(device);
up(&device_sem); mutex_unlock(&device_mutex);
return 0; return 0;
} }
...@@ -339,7 +338,7 @@ void ib_unregister_client(struct ib_client *client) ...@@ -339,7 +338,7 @@ void ib_unregister_client(struct ib_client *client)
struct ib_device *device; struct ib_device *device;
unsigned long flags; unsigned long flags;
down(&device_sem); mutex_lock(&device_mutex);
list_for_each_entry(device, &device_list, core_list) { list_for_each_entry(device, &device_list, core_list) {
if (client->remove) if (client->remove)
...@@ -355,7 +354,7 @@ void ib_unregister_client(struct ib_client *client) ...@@ -355,7 +354,7 @@ void ib_unregister_client(struct ib_client *client)
} }
list_del(&client->list); list_del(&client->list);
up(&device_sem); mutex_unlock(&device_mutex);
} }
EXPORT_SYMBOL(ib_unregister_client); EXPORT_SYMBOL(ib_unregister_client);
......
...@@ -445,13 +445,7 @@ static int ib_device_uevent(struct class_device *cdev, char **envp, ...@@ -445,13 +445,7 @@ static int ib_device_uevent(struct class_device *cdev, char **envp,
return -ENOMEM; return -ENOMEM;
/* /*
* It might be nice to pass the node GUID with the event, but * It would be nice to pass the node GUID with the event...
* right now the only way to get it is to query the device
* provider, and this can crash during device removal because
* we are will be running after driver removal has started.
* We could add a node_guid field to struct ib_device, or we
* could just let userspace read the node GUID from sysfs when
* devices are added.
*/ */
envp[i] = NULL; envp[i] = NULL;
...@@ -623,21 +617,15 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf) ...@@ -623,21 +617,15 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
static ssize_t show_node_guid(struct class_device *cdev, char *buf) static ssize_t show_node_guid(struct class_device *cdev, char *buf)
{ {
struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
struct ib_device_attr attr;
ssize_t ret;
if (!ibdev_is_alive(dev)) if (!ibdev_is_alive(dev))
return -ENODEV; return -ENODEV;
ret = ib_query_device(dev, &attr);
if (ret)
return ret;
return sprintf(buf, "%04x:%04x:%04x:%04x\n", return sprintf(buf, "%04x:%04x:%04x:%04x\n",
be16_to_cpu(((__be16 *) &attr.node_guid)[0]), be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
be16_to_cpu(((__be16 *) &attr.node_guid)[1]), be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
be16_to_cpu(((__be16 *) &attr.node_guid)[2]), be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
be16_to_cpu(((__be16 *) &attr.node_guid)[3])); be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
} }
static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/mutex.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -113,7 +114,7 @@ static struct ib_client ucm_client = { ...@@ -113,7 +114,7 @@ static struct ib_client ucm_client = {
.remove = ib_ucm_remove_one .remove = ib_ucm_remove_one
}; };
static DECLARE_MUTEX(ctx_id_mutex); static DEFINE_MUTEX(ctx_id_mutex);
static DEFINE_IDR(ctx_id_table); static DEFINE_IDR(ctx_id_table);
static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES); static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
...@@ -121,7 +122,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) ...@@ -121,7 +122,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
{ {
struct ib_ucm_context *ctx; struct ib_ucm_context *ctx;
down(&ctx_id_mutex); mutex_lock(&ctx_id_mutex);
ctx = idr_find(&ctx_id_table, id); ctx = idr_find(&ctx_id_table, id);
if (!ctx) if (!ctx)
ctx = ERR_PTR(-ENOENT); ctx = ERR_PTR(-ENOENT);
...@@ -129,7 +130,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) ...@@ -129,7 +130,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
ctx = ERR_PTR(-EINVAL); ctx = ERR_PTR(-EINVAL);
else else
atomic_inc(&ctx->ref); atomic_inc(&ctx->ref);
up(&ctx_id_mutex); mutex_unlock(&ctx_id_mutex);
return ctx; return ctx;
} }
...@@ -186,9 +187,9 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) ...@@ -186,9 +187,9 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
if (!result) if (!result)
goto error; goto error;
down(&ctx_id_mutex); mutex_lock(&ctx_id_mutex);
result = idr_get_new(&ctx_id_table, ctx, &ctx->id); result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
up(&ctx_id_mutex); mutex_unlock(&ctx_id_mutex);
} while (result == -EAGAIN); } while (result == -EAGAIN);
if (result) if (result)
...@@ -550,9 +551,9 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file, ...@@ -550,9 +551,9 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
err2: err2:
ib_destroy_cm_id(ctx->cm_id); ib_destroy_cm_id(ctx->cm_id);
err1: err1:
down(&ctx_id_mutex); mutex_lock(&ctx_id_mutex);
idr_remove(&ctx_id_table, ctx->id); idr_remove(&ctx_id_table, ctx->id);
up(&ctx_id_mutex); mutex_unlock(&ctx_id_mutex);
kfree(ctx); kfree(ctx);
return result; return result;
} }
...@@ -572,7 +573,7 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file, ...@@ -572,7 +573,7 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
down(&ctx_id_mutex); mutex_lock(&ctx_id_mutex);
ctx = idr_find(&ctx_id_table, cmd.id); ctx = idr_find(&ctx_id_table, cmd.id);
if (!ctx) if (!ctx)
ctx = ERR_PTR(-ENOENT); ctx = ERR_PTR(-ENOENT);
...@@ -580,7 +581,7 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file, ...@@ -580,7 +581,7 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
ctx = ERR_PTR(-EINVAL); ctx = ERR_PTR(-EINVAL);
else else
idr_remove(&ctx_id_table, ctx->id); idr_remove(&ctx_id_table, ctx->id);
up(&ctx_id_mutex); mutex_unlock(&ctx_id_mutex);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
...@@ -1280,9 +1281,9 @@ static int ib_ucm_close(struct inode *inode, struct file *filp) ...@@ -1280,9 +1281,9 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
struct ib_ucm_context, file_list); struct ib_ucm_context, file_list);
up(&file->mutex); up(&file->mutex);
down(&ctx_id_mutex); mutex_lock(&ctx_id_mutex);
idr_remove(&ctx_id_table, ctx->id); idr_remove(&ctx_id_table, ctx->id);
up(&ctx_id_mutex); mutex_unlock(&ctx_id_mutex);
ib_destroy_cm_id(ctx->cm_id); ib_destroy_cm_id(ctx->cm_id);
ib_ucm_cleanup_events(ctx); ib_ucm_cleanup_events(ctx);
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/mutex.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
...@@ -88,7 +89,7 @@ struct ib_uverbs_event_file { ...@@ -88,7 +89,7 @@ struct ib_uverbs_event_file {
struct ib_uverbs_file { struct ib_uverbs_file {
struct kref ref; struct kref ref;
struct semaphore mutex; struct mutex mutex;
struct ib_uverbs_device *device; struct ib_uverbs_device *device;
struct ib_ucontext *ucontext; struct ib_ucontext *ucontext;
struct ib_event_handler event_handler; struct ib_event_handler event_handler;
...@@ -131,7 +132,7 @@ struct ib_ucq_object { ...@@ -131,7 +132,7 @@ struct ib_ucq_object {
u32 async_events_reported; u32 async_events_reported;
}; };
extern struct semaphore ib_uverbs_idr_mutex; extern struct mutex ib_uverbs_idr_mutex;
extern struct idr ib_uverbs_pd_idr; extern struct idr ib_uverbs_pd_idr;
extern struct idr ib_uverbs_mr_idr; extern struct idr ib_uverbs_mr_idr;
extern struct idr ib_uverbs_mw_idr; extern struct idr ib_uverbs_mw_idr;
......
This diff is collapsed.
...@@ -66,7 +66,7 @@ enum { ...@@ -66,7 +66,7 @@ enum {
static struct class *uverbs_class; static struct class *uverbs_class;
DECLARE_MUTEX(ib_uverbs_idr_mutex); DEFINE_MUTEX(ib_uverbs_idr_mutex);
DEFINE_IDR(ib_uverbs_pd_idr); DEFINE_IDR(ib_uverbs_pd_idr);
DEFINE_IDR(ib_uverbs_mr_idr); DEFINE_IDR(ib_uverbs_mr_idr);
DEFINE_IDR(ib_uverbs_mw_idr); DEFINE_IDR(ib_uverbs_mw_idr);
...@@ -180,7 +180,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, ...@@ -180,7 +180,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
if (!context) if (!context)
return 0; return 0;
down(&ib_uverbs_idr_mutex); mutex_lock(&ib_uverbs_idr_mutex);
list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) { list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
struct ib_ah *ah = idr_find(&ib_uverbs_ah_idr, uobj->id); struct ib_ah *ah = idr_find(&ib_uverbs_ah_idr, uobj->id);
...@@ -250,7 +250,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, ...@@ -250,7 +250,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uobj); kfree(uobj);
} }
up(&ib_uverbs_idr_mutex); mutex_unlock(&ib_uverbs_idr_mutex);
return context->device->dealloc_ucontext(context); return context->device->dealloc_ucontext(context);
} }
...@@ -653,7 +653,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) ...@@ -653,7 +653,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
file->ucontext = NULL; file->ucontext = NULL;
file->async_file = NULL; file->async_file = NULL;
kref_init(&file->ref); kref_init(&file->ref);
init_MUTEX(&file->mutex); mutex_init(&file->mutex);
filp->private_data = file; filp->private_data = file;
......
...@@ -163,6 +163,11 @@ int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah) ...@@ -163,6 +163,11 @@ int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
return 0; return 0;
} }
int mthca_ah_grh_present(struct mthca_ah *ah)
{
return !!(ah->av->g_slid & 0x80);
}
int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
struct ib_ud_header *header) struct ib_ud_header *header)
{ {
...@@ -172,8 +177,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, ...@@ -172,8 +177,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
header->lrh.destination_lid = ah->av->dlid; header->lrh.destination_lid = ah->av->dlid;
header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f); header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f);
if (ah->av->g_slid & 0x80) { if (mthca_ah_grh_present(ah)) {
header->grh_present = 1;
header->grh.traffic_class = header->grh.traffic_class =
(be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff; (be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff;
header->grh.flow_label = header->grh.flow_label =
...@@ -184,8 +188,6 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, ...@@ -184,8 +188,6 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
&header->grh.source_gid); &header->grh.source_gid);
memcpy(header->grh.destination_gid.raw, memcpy(header->grh.destination_gid.raw,
ah->av->dgid, 16); ah->av->dgid, 16);
} else {
header->grh_present = 0;
} }
return 0; return 0;
......
...@@ -606,7 +606,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, ...@@ -606,7 +606,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i) { for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
if (virt != -1) { if (virt != -1) {
pages[nent * 2] = cpu_to_be64(virt); pages[nent * 2] = cpu_to_be64(virt);
virt += 1 << lg; virt += 1 << lg;
...@@ -727,8 +727,8 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) ...@@ -727,8 +727,8 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
* system pages needed. * system pages needed.
*/ */
dev->fw.arbel.fw_pages = dev->fw.arbel.fw_pages =
(dev->fw.arbel.fw_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >>
(PAGE_SHIFT - 12); (PAGE_SHIFT - 12);
mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n", mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n",
(unsigned long long) dev->fw.arbel.clr_int_base, (unsigned long long) dev->fw.arbel.clr_int_base,
...@@ -1445,6 +1445,7 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, ...@@ -1445,6 +1445,7 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
* pages needed. * pages needed.
*/ */
*aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12); *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12);
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12);
return 0; return 0;
} }
......
...@@ -520,6 +520,7 @@ int mthca_create_ah(struct mthca_dev *dev, ...@@ -520,6 +520,7 @@ int mthca_create_ah(struct mthca_dev *dev,
int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah); int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah);
int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
struct ib_ud_header *header); struct ib_ud_header *header);
int mthca_ah_grh_present(struct mthca_ah *ah);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
enum { enum {
MTHCA_NUM_ASYNC_EQE = 0x80, MTHCA_NUM_ASYNC_EQE = 0x80,
MTHCA_NUM_CMD_EQE = 0x80, MTHCA_NUM_CMD_EQE = 0x80,
MTHCA_NUM_SPARE_EQE = 0x80,
MTHCA_EQ_ENTRY_SIZE = 0x20 MTHCA_EQ_ENTRY_SIZE = 0x20
}; };
...@@ -277,11 +278,10 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) ...@@ -277,11 +278,10 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
{ {
struct mthca_eqe *eqe; struct mthca_eqe *eqe;
int disarm_cqn; int disarm_cqn;
int eqes_found = 0; int eqes_found = 0;
int set_ci = 0;
while ((eqe = next_eqe_sw(eq))) { while ((eqe = next_eqe_sw(eq))) {
int set_ci = 0;
/* /*
* Make sure we read EQ entry contents after we've * Make sure we read EQ entry contents after we've
* checked the ownership bit. * checked the ownership bit.
...@@ -345,12 +345,6 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) ...@@ -345,12 +345,6 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
be16_to_cpu(eqe->event.cmd.token), be16_to_cpu(eqe->event.cmd.token),
eqe->event.cmd.status, eqe->event.cmd.status,
be64_to_cpu(eqe->event.cmd.out_param)); be64_to_cpu(eqe->event.cmd.out_param));
/*
* cmd_event() may add more commands.
* The card will think the queue has overflowed if
* we don't tell it we've been processing events.
*/
set_ci = 1;
break; break;
case MTHCA_EVENT_TYPE_PORT_CHANGE: case MTHCA_EVENT_TYPE_PORT_CHANGE:
...@@ -385,8 +379,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) ...@@ -385,8 +379,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
set_eqe_hw(eqe); set_eqe_hw(eqe);
++eq->cons_index; ++eq->cons_index;
eqes_found = 1; eqes_found = 1;
++set_ci;
if (unlikely(set_ci)) { /*
* The HCA will think the queue has overflowed if we
* don't tell it we've been processing events. We
* create our EQs with MTHCA_NUM_SPARE_EQE extra
* entries, so we must update our consumer index at
* least that often.
*/
if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
/* /*
* Conditional on hca_type is OK here because * Conditional on hca_type is OK here because
* this is a rare case, not the fast path. * this is a rare case, not the fast path.
...@@ -862,19 +864,19 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev) ...@@ -862,19 +864,19 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ? intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?
128 : dev->eq_table.inta_pin; 128 : dev->eq_table.inta_pin;
err = mthca_create_eq(dev, dev->limits.num_cqs, err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
&dev->eq_table.eq[MTHCA_EQ_COMP]); &dev->eq_table.eq[MTHCA_EQ_COMP]);
if (err) if (err)
goto err_out_unmap; goto err_out_unmap;
err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE, err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
&dev->eq_table.eq[MTHCA_EQ_ASYNC]); &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
if (err) if (err)
goto err_out_comp; goto err_out_comp;
err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE, err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
&dev->eq_table.eq[MTHCA_EQ_CMD]); &dev->eq_table.eq[MTHCA_EQ_CMD]);
if (err) if (err)
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
* *
* $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $ * $Id: mthca_provider.c 4859 2006-01-09 21:55:10Z roland $
*/ */
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
...@@ -45,6 +45,14 @@ ...@@ -45,6 +45,14 @@
#include "mthca_user.h" #include "mthca_user.h"
#include "mthca_memfree.h" #include "mthca_memfree.h"
static void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
mad->class_version = 1;
mad->method = IB_MGMT_METHOD_GET;
}
static int mthca_query_device(struct ib_device *ibdev, static int mthca_query_device(struct ib_device *ibdev,
struct ib_device_attr *props) struct ib_device_attr *props)
{ {
...@@ -55,7 +63,7 @@ static int mthca_query_device(struct ib_device *ibdev, ...@@ -55,7 +63,7 @@ static int mthca_query_device(struct ib_device *ibdev,
u8 status; u8 status;
in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad) if (!in_mad || !out_mad)
goto out; goto out;
...@@ -64,12 +72,8 @@ static int mthca_query_device(struct ib_device *ibdev, ...@@ -64,12 +72,8 @@ static int mthca_query_device(struct ib_device *ibdev,
props->fw_ver = mdev->fw_ver; props->fw_ver = mdev->fw_ver;
memset(in_mad, 0, sizeof *in_mad); init_query_mad(in_mad);
in_mad->base_version = 1; in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
in_mad->class_version = 1;
in_mad->method = IB_MGMT_METHOD_GET;
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(mdev, 1, 1, err = mthca_MAD_IFC(mdev, 1, 1,
1, NULL, NULL, in_mad, out_mad, 1, NULL, NULL, in_mad, out_mad,
...@@ -87,7 +91,6 @@ static int mthca_query_device(struct ib_device *ibdev, ...@@ -87,7 +91,6 @@ static int mthca_query_device(struct ib_device *ibdev,
props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8); memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
memcpy(&props->node_guid, out_mad->data + 12, 8);
props->max_mr_size = ~0ull; props->max_mr_size = ~0ull;
props->page_size_cap = mdev->limits.page_size_cap; props->page_size_cap = mdev->limits.page_size_cap;
...@@ -128,20 +131,16 @@ static int mthca_query_port(struct ib_device *ibdev, ...@@ -128,20 +131,16 @@ static int mthca_query_port(struct ib_device *ibdev,
int err = -ENOMEM; int err = -ENOMEM;
u8 status; u8 status;
in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad) if (!in_mad || !out_mad)
goto out; goto out;
memset(props, 0, sizeof *props); memset(props, 0, sizeof *props);
memset(in_mad, 0, sizeof *in_mad); init_query_mad(in_mad);
in_mad->base_version = 1; in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; in_mad->attr_mod = cpu_to_be32(port);
in_mad->class_version = 1;
in_mad->method = IB_MGMT_METHOD_GET;
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad, port, NULL, NULL, in_mad, out_mad,
...@@ -220,18 +219,14 @@ static int mthca_query_pkey(struct ib_device *ibdev, ...@@ -220,18 +219,14 @@ static int mthca_query_pkey(struct ib_device *ibdev,
int err = -ENOMEM; int err = -ENOMEM;
u8 status; u8 status;
in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad) if (!in_mad || !out_mad)
goto out; goto out;
memset(in_mad, 0, sizeof *in_mad); init_query_mad(in_mad);
in_mad->base_version = 1; in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; in_mad->attr_mod = cpu_to_be32(index / 32);
in_mad->class_version = 1;
in_mad->method = IB_MGMT_METHOD_GET;
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad, port, NULL, NULL, in_mad, out_mad,
...@@ -259,18 +254,14 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port, ...@@ -259,18 +254,14 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
int err = -ENOMEM; int err = -ENOMEM;
u8 status; u8 status;
in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad) if (!in_mad || !out_mad)
goto out; goto out;
memset(in_mad, 0, sizeof *in_mad); init_query_mad(in_mad);
in_mad->base_version = 1; in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; in_mad->attr_mod = cpu_to_be32(port);
in_mad->class_version = 1;
in_mad->method = IB_MGMT_METHOD_GET;
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad, port, NULL, NULL, in_mad, out_mad,
...@@ -284,13 +275,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port, ...@@ -284,13 +275,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
memcpy(gid->raw, out_mad->data + 8, 8); memcpy(gid->raw, out_mad->data + 8, 8);
memset(in_mad, 0, sizeof *in_mad); init_query_mad(in_mad);
in_mad->base_version = 1; in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; in_mad->attr_mod = cpu_to_be32(index / 8);
in_mad->class_version = 1;
in_mad->method = IB_MGMT_METHOD_GET;
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad, port, NULL, NULL, in_mad, out_mad,
...@@ -458,8 +445,10 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd, ...@@ -458,8 +445,10 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
if (pd->uobject) { if (pd->uobject) {
context = to_mucontext(pd->uobject->context); context = to_mucontext(pd->uobject->context);
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
return ERR_PTR(-EFAULT); err = -EFAULT;
goto err_free;
}
err = mthca_map_user_db(to_mdev(pd->device), &context->uar, err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab, ucmd.db_index, context->db_tab, ucmd.db_index,
...@@ -535,8 +524,10 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, ...@@ -535,8 +524,10 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
if (pd->uobject) { if (pd->uobject) {
context = to_mucontext(pd->uobject->context); context = to_mucontext(pd->uobject->context);
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
kfree(qp);
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
}
err = mthca_map_user_db(to_mdev(pd->device), &context->uar, err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab, context->db_tab,
...@@ -783,24 +774,20 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, ...@@ -783,24 +774,20 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (num_phys_buf > 1 &&
((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK))
return ERR_PTR(-EINVAL);
mask = 0; mask = 0;
total_size = 0; total_size = 0;
for (i = 0; i < num_phys_buf; ++i) { for (i = 0; i < num_phys_buf; ++i) {
if (i != 0 && buffer_list[i].addr & ~PAGE_MASK) if (i != 0)
return ERR_PTR(-EINVAL); mask |= buffer_list[i].addr;
if (i != 0 && i != num_phys_buf - 1 && if (i != num_phys_buf - 1)
(buffer_list[i].size & ~PAGE_MASK)) mask |= buffer_list[i].addr + buffer_list[i].size;
return ERR_PTR(-EINVAL);
total_size += buffer_list[i].size; total_size += buffer_list[i].size;
if (i > 0)
mask |= buffer_list[i].addr;
} }
if (mask & ~PAGE_MASK)
return ERR_PTR(-EINVAL);
/* Find largest page shift we can use to cover buffers */ /* Find largest page shift we can use to cover buffers */
for (shift = PAGE_SHIFT; shift < 31; ++shift) for (shift = PAGE_SHIFT; shift < 31; ++shift)
if (num_phys_buf > 1) { if (num_phys_buf > 1) {
...@@ -1070,11 +1057,48 @@ static struct class_device_attribute *mthca_class_attributes[] = { ...@@ -1070,11 +1057,48 @@ static struct class_device_attribute *mthca_class_attributes[] = {
&class_device_attr_board_id &class_device_attr_board_id
}; };
static int mthca_init_node_data(struct mthca_dev *dev)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
u8 status;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(dev, 1, 1,
1, NULL, NULL, in_mad, out_mad,
&status);
if (err)
goto out;
if (status) {
err = -EINVAL;
goto out;
}
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
int mthca_register_device(struct mthca_dev *dev) int mthca_register_device(struct mthca_dev *dev)
{ {
int ret; int ret;
int i; int i;
ret = mthca_init_node_data(dev);
if (ret)
return ret;
strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.owner = THIS_MODULE;
......
...@@ -1434,7 +1434,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, ...@@ -1434,7 +1434,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
u16 pkey; u16 pkey;
ib_ud_header_init(256, /* assume a MAD */ ib_ud_header_init(256, /* assume a MAD */
sqp->ud_header.grh_present, mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
&sqp->ud_header); &sqp->ud_header);
err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
......
...@@ -45,11 +45,11 @@ ...@@ -45,11 +45,11 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/if_infiniband.h> #include <linux/if_infiniband.h>
#include <linux/mutex.h>
#include <net/neighbour.h> #include <net/neighbour.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/semaphore.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h> #include <rdma/ib_pack.h>
...@@ -123,8 +123,8 @@ struct ipoib_dev_priv { ...@@ -123,8 +123,8 @@ struct ipoib_dev_priv {
unsigned long flags; unsigned long flags;
struct semaphore mcast_mutex; struct mutex mcast_mutex;
struct semaphore vlan_mutex; struct mutex vlan_mutex;
struct rb_root path_tree; struct rb_root path_tree;
struct list_head path_list; struct list_head path_list;
......
...@@ -52,7 +52,7 @@ MODULE_PARM_DESC(data_debug_level, ...@@ -52,7 +52,7 @@ MODULE_PARM_DESC(data_debug_level,
#define IPOIB_OP_RECV (1ul << 31) #define IPOIB_OP_RECV (1ul << 31)
static DECLARE_MUTEX(pkey_sem); static DEFINE_MUTEX(pkey_mutex);
struct ipoib_ah *ipoib_create_ah(struct net_device *dev, struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr) struct ib_pd *pd, struct ib_ah_attr *attr)
...@@ -445,25 +445,16 @@ int ipoib_ib_dev_down(struct net_device *dev) ...@@ -445,25 +445,16 @@ int ipoib_ib_dev_down(struct net_device *dev)
/* Shutdown the P_Key thread if still active */ /* Shutdown the P_Key thread if still active */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
down(&pkey_sem); mutex_lock(&pkey_mutex);
set_bit(IPOIB_PKEY_STOP, &priv->flags); set_bit(IPOIB_PKEY_STOP, &priv->flags);
cancel_delayed_work(&priv->pkey_task); cancel_delayed_work(&priv->pkey_task);
up(&pkey_sem); mutex_unlock(&pkey_mutex);
flush_workqueue(ipoib_workqueue); flush_workqueue(ipoib_workqueue);
} }
ipoib_mcast_stop_thread(dev, 1); ipoib_mcast_stop_thread(dev, 1);
/*
* Flush the multicast groups first so we stop any multicast joins. The
* completion thread may have already died and we may deadlock waiting
* for the completion thread to finish some multicast joins.
*/
ipoib_mcast_dev_flush(dev); ipoib_mcast_dev_flush(dev);
/* Delete broadcast and local addresses since they will be recreated */
ipoib_mcast_dev_down(dev);
ipoib_flush_paths(dev); ipoib_flush_paths(dev);
return 0; return 0;
...@@ -608,13 +599,13 @@ void ipoib_ib_dev_flush(void *_dev) ...@@ -608,13 +599,13 @@ void ipoib_ib_dev_flush(void *_dev)
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
ipoib_ib_dev_up(dev); ipoib_ib_dev_up(dev);
down(&priv->vlan_mutex); mutex_lock(&priv->vlan_mutex);
/* Flush any child interfaces too */ /* Flush any child interfaces too */
list_for_each_entry(cpriv, &priv->child_intfs, list) list_for_each_entry(cpriv, &priv->child_intfs, list)
ipoib_ib_dev_flush(&cpriv->dev); ipoib_ib_dev_flush(&cpriv->dev);
up(&priv->vlan_mutex); mutex_unlock(&priv->vlan_mutex);
} }
void ipoib_ib_dev_cleanup(struct net_device *dev) void ipoib_ib_dev_cleanup(struct net_device *dev)
...@@ -624,9 +615,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) ...@@ -624,9 +615,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
ipoib_dbg(priv, "cleaning up ib_dev\n"); ipoib_dbg(priv, "cleaning up ib_dev\n");
ipoib_mcast_stop_thread(dev, 1); ipoib_mcast_stop_thread(dev, 1);
ipoib_mcast_dev_flush(dev);
/* Delete the broadcast address and the local address */
ipoib_mcast_dev_down(dev);
ipoib_transport_dev_cleanup(dev); ipoib_transport_dev_cleanup(dev);
} }
...@@ -662,12 +651,12 @@ void ipoib_pkey_poll(void *dev_ptr) ...@@ -662,12 +651,12 @@ void ipoib_pkey_poll(void *dev_ptr)
if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
ipoib_open(dev); ipoib_open(dev);
else { else {
down(&pkey_sem); mutex_lock(&pkey_mutex);
if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
queue_delayed_work(ipoib_workqueue, queue_delayed_work(ipoib_workqueue,
&priv->pkey_task, &priv->pkey_task,
HZ); HZ);
up(&pkey_sem); mutex_unlock(&pkey_mutex);
} }
} }
...@@ -681,12 +670,12 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev) ...@@ -681,12 +670,12 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev)
/* P_Key value not assigned yet - start polling */ /* P_Key value not assigned yet - start polling */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
down(&pkey_sem); mutex_lock(&pkey_mutex);
clear_bit(IPOIB_PKEY_STOP, &priv->flags); clear_bit(IPOIB_PKEY_STOP, &priv->flags);
queue_delayed_work(ipoib_workqueue, queue_delayed_work(ipoib_workqueue,
&priv->pkey_task, &priv->pkey_task,
HZ); HZ);
up(&pkey_sem); mutex_unlock(&pkey_mutex);
return 1; return 1;
} }
......
...@@ -105,7 +105,7 @@ int ipoib_open(struct net_device *dev) ...@@ -105,7 +105,7 @@ int ipoib_open(struct net_device *dev)
struct ipoib_dev_priv *cpriv; struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */ /* Bring up any child interfaces too */
down(&priv->vlan_mutex); mutex_lock(&priv->vlan_mutex);
list_for_each_entry(cpriv, &priv->child_intfs, list) { list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags; int flags;
...@@ -115,7 +115,7 @@ int ipoib_open(struct net_device *dev) ...@@ -115,7 +115,7 @@ int ipoib_open(struct net_device *dev)
dev_change_flags(cpriv->dev, flags | IFF_UP); dev_change_flags(cpriv->dev, flags | IFF_UP);
} }
up(&priv->vlan_mutex); mutex_unlock(&priv->vlan_mutex);
} }
netif_start_queue(dev); netif_start_queue(dev);
...@@ -140,7 +140,7 @@ static int ipoib_stop(struct net_device *dev) ...@@ -140,7 +140,7 @@ static int ipoib_stop(struct net_device *dev)
struct ipoib_dev_priv *cpriv; struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */ /* Bring down any child interfaces too */
down(&priv->vlan_mutex); mutex_lock(&priv->vlan_mutex);
list_for_each_entry(cpriv, &priv->child_intfs, list) { list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags; int flags;
...@@ -150,7 +150,7 @@ static int ipoib_stop(struct net_device *dev) ...@@ -150,7 +150,7 @@ static int ipoib_stop(struct net_device *dev)
dev_change_flags(cpriv->dev, flags & ~IFF_UP); dev_change_flags(cpriv->dev, flags & ~IFF_UP);
} }
up(&priv->vlan_mutex); mutex_unlock(&priv->vlan_mutex);
} }
return 0; return 0;
...@@ -892,8 +892,8 @@ static void ipoib_setup(struct net_device *dev) ...@@ -892,8 +892,8 @@ static void ipoib_setup(struct net_device *dev)
spin_lock_init(&priv->lock); spin_lock_init(&priv->lock);
spin_lock_init(&priv->tx_lock); spin_lock_init(&priv->tx_lock);
init_MUTEX(&priv->mcast_mutex); mutex_init(&priv->mcast_mutex);
init_MUTEX(&priv->vlan_mutex); mutex_init(&priv->vlan_mutex);
INIT_LIST_HEAD(&priv->path_list); INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs); INIT_LIST_HEAD(&priv->child_intfs);
......
...@@ -55,7 +55,7 @@ MODULE_PARM_DESC(mcast_debug_level, ...@@ -55,7 +55,7 @@ MODULE_PARM_DESC(mcast_debug_level,
"Enable multicast debug tracing if > 0"); "Enable multicast debug tracing if > 0");
#endif #endif
static DECLARE_MUTEX(mcast_mutex); static DEFINE_MUTEX(mcast_mutex);
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
struct ipoib_mcast { struct ipoib_mcast {
...@@ -97,8 +97,6 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) ...@@ -97,8 +97,6 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh, *tmp; struct ipoib_neigh *neigh, *tmp;
unsigned long flags; unsigned long flags;
LIST_HEAD(ah_list);
struct ipoib_ah *ah, *tah;
ipoib_dbg_mcast(netdev_priv(dev), ipoib_dbg_mcast(netdev_priv(dev),
"deleting multicast group " IPOIB_GID_FMT "\n", "deleting multicast group " IPOIB_GID_FMT "\n",
...@@ -107,8 +105,14 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) ...@@ -107,8 +105,14 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
/*
* It's safe to call ipoib_put_ah() inside priv->lock
* here, because we know that mcast->ah will always
* hold one more reference, so ipoib_put_ah() will
* never do more than decrement the ref count.
*/
if (neigh->ah) if (neigh->ah)
list_add_tail(&neigh->ah->list, &ah_list); ipoib_put_ah(neigh->ah);
*to_ipoib_neigh(neigh->neighbour) = NULL; *to_ipoib_neigh(neigh->neighbour) = NULL;
neigh->neighbour->ops->destructor = NULL; neigh->neighbour->ops->destructor = NULL;
kfree(neigh); kfree(neigh);
...@@ -116,9 +120,6 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) ...@@ -116,9 +120,6 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
list_for_each_entry_safe(ah, tah, &ah_list, list)
ipoib_put_ah(ah);
if (mcast->ah) if (mcast->ah)
ipoib_put_ah(mcast->ah); ipoib_put_ah(mcast->ah);
...@@ -384,10 +385,10 @@ static void ipoib_mcast_join_complete(int status, ...@@ -384,10 +385,10 @@ static void ipoib_mcast_join_complete(int status,
if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
mcast->backoff = 1; mcast->backoff = 1;
down(&mcast_mutex); mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_work(ipoib_workqueue, &priv->mcast_task); queue_work(ipoib_workqueue, &priv->mcast_task);
up(&mcast_mutex); mutex_unlock(&mcast_mutex);
complete(&mcast->done); complete(&mcast->done);
return; return;
} }
...@@ -417,7 +418,7 @@ static void ipoib_mcast_join_complete(int status, ...@@ -417,7 +418,7 @@ static void ipoib_mcast_join_complete(int status,
mcast->query = NULL; mcast->query = NULL;
down(&mcast_mutex); mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
if (status == -ETIMEDOUT) if (status == -ETIMEDOUT)
queue_work(ipoib_workqueue, &priv->mcast_task); queue_work(ipoib_workqueue, &priv->mcast_task);
...@@ -426,7 +427,7 @@ static void ipoib_mcast_join_complete(int status, ...@@ -426,7 +427,7 @@ static void ipoib_mcast_join_complete(int status,
mcast->backoff * HZ); mcast->backoff * HZ);
} else } else
complete(&mcast->done); complete(&mcast->done);
up(&mcast_mutex); mutex_unlock(&mcast_mutex);
return; return;
} }
...@@ -481,12 +482,12 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, ...@@ -481,12 +482,12 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
down(&mcast_mutex); mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(ipoib_workqueue, queue_delayed_work(ipoib_workqueue,
&priv->mcast_task, &priv->mcast_task,
mcast->backoff * HZ); mcast->backoff * HZ);
up(&mcast_mutex); mutex_unlock(&mcast_mutex);
} else } else
mcast->query_id = ret; mcast->query_id = ret;
} }
...@@ -519,11 +520,11 @@ void ipoib_mcast_join_task(void *dev_ptr) ...@@ -519,11 +520,11 @@ void ipoib_mcast_join_task(void *dev_ptr)
priv->broadcast = ipoib_mcast_alloc(dev, 1); priv->broadcast = ipoib_mcast_alloc(dev, 1);
if (!priv->broadcast) { if (!priv->broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n"); ipoib_warn(priv, "failed to allocate broadcast group\n");
down(&mcast_mutex); mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(ipoib_workqueue, queue_delayed_work(ipoib_workqueue,
&priv->mcast_task, HZ); &priv->mcast_task, HZ);
up(&mcast_mutex); mutex_unlock(&mcast_mutex);
return; return;
} }
...@@ -579,10 +580,10 @@ int ipoib_mcast_start_thread(struct net_device *dev) ...@@ -579,10 +580,10 @@ int ipoib_mcast_start_thread(struct net_device *dev)
ipoib_dbg_mcast(priv, "starting multicast thread\n"); ipoib_dbg_mcast(priv, "starting multicast thread\n");
down(&mcast_mutex); mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_work(ipoib_workqueue, &priv->mcast_task); queue_work(ipoib_workqueue, &priv->mcast_task);
up(&mcast_mutex); mutex_unlock(&mcast_mutex);
return 0; return 0;
} }
...@@ -594,10 +595,10 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush) ...@@ -594,10 +595,10 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
ipoib_dbg_mcast(priv, "stopping multicast thread\n"); ipoib_dbg_mcast(priv, "stopping multicast thread\n");
down(&mcast_mutex); mutex_lock(&mcast_mutex);
clear_bit(IPOIB_MCAST_RUN, &priv->flags); clear_bit(IPOIB_MCAST_RUN, &priv->flags);
cancel_delayed_work(&priv->mcast_task); cancel_delayed_work(&priv->mcast_task);
up(&mcast_mutex); mutex_unlock(&mcast_mutex);
if (flush) if (flush)
flush_workqueue(ipoib_workqueue); flush_workqueue(ipoib_workqueue);
...@@ -741,48 +742,23 @@ void ipoib_mcast_dev_flush(struct net_device *dev) ...@@ -741,48 +742,23 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
LIST_HEAD(remove_list); LIST_HEAD(remove_list);
struct ipoib_mcast *mcast, *tmcast, *nmcast; struct ipoib_mcast *mcast, *tmcast;
unsigned long flags; unsigned long flags;
ipoib_dbg_mcast(priv, "flushing multicast list\n"); ipoib_dbg_mcast(priv, "flushing multicast list\n");
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
nmcast = ipoib_mcast_alloc(dev, 0);
if (nmcast) {
nmcast->flags =
mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY);
nmcast->mcmember.mgid = mcast->mcmember.mgid; list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
list_del(&mcast->list);
/* Add the new group in before the to-be-destroyed group */ rb_erase(&mcast->rb_node, &priv->multicast_tree);
list_add_tail(&nmcast->list, &mcast->list); list_add_tail(&mcast->list, &remove_list);
list_del_init(&mcast->list);
rb_replace_node(&mcast->rb_node, &nmcast->rb_node,
&priv->multicast_tree);
list_add_tail(&mcast->list, &remove_list);
} else {
ipoib_warn(priv, "could not reallocate multicast group "
IPOIB_GID_FMT "\n",
IPOIB_GID_ARG(mcast->mcmember.mgid));
}
} }
if (priv->broadcast) { if (priv->broadcast) {
nmcast = ipoib_mcast_alloc(dev, 0); rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
if (nmcast) { list_add_tail(&priv->broadcast->list, &remove_list);
nmcast->mcmember.mgid = priv->broadcast->mcmember.mgid; priv->broadcast = NULL;
rb_replace_node(&priv->broadcast->rb_node,
&nmcast->rb_node,
&priv->multicast_tree);
list_add_tail(&priv->broadcast->list, &remove_list);
}
priv->broadcast = nmcast;
} }
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
...@@ -793,24 +769,6 @@ void ipoib_mcast_dev_flush(struct net_device *dev) ...@@ -793,24 +769,6 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
} }
} }
void ipoib_mcast_dev_down(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned long flags;
/* Delete broadcast since it will be recreated */
if (priv->broadcast) {
ipoib_dbg_mcast(priv, "deleting broadcast group\n");
spin_lock_irqsave(&priv->lock, flags);
rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_mcast_leave(dev, priv->broadcast);
ipoib_mcast_free(priv->broadcast);
priv->broadcast = NULL;
}
}
void ipoib_mcast_restart_task(void *dev_ptr) void ipoib_mcast_restart_task(void *dev_ptr)
{ {
struct net_device *dev = dev_ptr; struct net_device *dev = dev_ptr;
...@@ -824,7 +782,8 @@ void ipoib_mcast_restart_task(void *dev_ptr) ...@@ -824,7 +782,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
ipoib_mcast_stop_thread(dev, 0); ipoib_mcast_stop_thread(dev, 0);
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&dev->xmit_lock, flags);
spin_lock(&priv->lock);
/* /*
* Unfortunately, the networking core only gives us a list of all of * Unfortunately, the networking core only gives us a list of all of
...@@ -896,7 +855,9 @@ void ipoib_mcast_restart_task(void *dev_ptr) ...@@ -896,7 +855,9 @@ void ipoib_mcast_restart_task(void *dev_ptr)
list_add_tail(&mcast->list, &remove_list); list_add_tail(&mcast->list, &remove_list);
} }
} }
spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&dev->xmit_lock, flags);
/* We have to cancel outside of the spinlock */ /* We have to cancel outside of the spinlock */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
......
...@@ -65,9 +65,9 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid) ...@@ -65,9 +65,9 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
} }
/* attach QP to multicast group */ /* attach QP to multicast group */
down(&priv->mcast_mutex); mutex_lock(&priv->mcast_mutex);
ret = ib_attach_mcast(priv->qp, mgid, mlid); ret = ib_attach_mcast(priv->qp, mgid, mlid);
up(&priv->mcast_mutex); mutex_unlock(&priv->mcast_mutex);
if (ret) if (ret)
ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret); ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);
...@@ -81,9 +81,9 @@ int ipoib_mcast_detach(struct net_device *dev, u16 mlid, union ib_gid *mgid) ...@@ -81,9 +81,9 @@ int ipoib_mcast_detach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret; int ret;
down(&priv->mcast_mutex); mutex_lock(&priv->mcast_mutex);
ret = ib_detach_mcast(priv->qp, mgid, mlid); ret = ib_detach_mcast(priv->qp, mgid, mlid);
up(&priv->mcast_mutex); mutex_unlock(&priv->mcast_mutex);
if (ret) if (ret)
ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
......
...@@ -63,7 +63,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ...@@ -63,7 +63,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev); ppriv = netdev_priv(pdev);
down(&ppriv->vlan_mutex); mutex_lock(&ppriv->vlan_mutex);
/* /*
* First ensure this isn't a duplicate. We check the parent device and * First ensure this isn't a duplicate. We check the parent device and
...@@ -124,7 +124,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ...@@ -124,7 +124,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
list_add_tail(&priv->list, &ppriv->child_intfs); list_add_tail(&priv->list, &ppriv->child_intfs);
up(&ppriv->vlan_mutex); mutex_unlock(&ppriv->vlan_mutex);
return 0; return 0;
...@@ -139,7 +139,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ...@@ -139,7 +139,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
free_netdev(priv->dev); free_netdev(priv->dev);
err: err:
up(&ppriv->vlan_mutex); mutex_unlock(&ppriv->vlan_mutex);
return result; return result;
} }
...@@ -153,7 +153,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) ...@@ -153,7 +153,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev); ppriv = netdev_priv(pdev);
down(&ppriv->vlan_mutex); mutex_lock(&ppriv->vlan_mutex);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey) { if (priv->pkey == pkey) {
unregister_netdev(priv->dev); unregister_netdev(priv->dev);
...@@ -167,7 +167,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) ...@@ -167,7 +167,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
break; break;
} }
} }
up(&ppriv->vlan_mutex); mutex_unlock(&ppriv->vlan_mutex);
return ret; return ret;
} }
...@@ -1516,8 +1516,7 @@ static ssize_t show_port(struct class_device *class_dev, char *buf) ...@@ -1516,8 +1516,7 @@ static ssize_t show_port(struct class_device *class_dev, char *buf)
static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
static struct srp_host *srp_add_port(struct ib_device *device, static struct srp_host *srp_add_port(struct ib_device *device, u8 port)
__be64 node_guid, u8 port)
{ {
struct srp_host *host; struct srp_host *host;
...@@ -1532,7 +1531,7 @@ static struct srp_host *srp_add_port(struct ib_device *device, ...@@ -1532,7 +1531,7 @@ static struct srp_host *srp_add_port(struct ib_device *device,
host->port = port; host->port = port;
host->initiator_port_id[7] = port; host->initiator_port_id[7] = port;
memcpy(host->initiator_port_id + 8, &node_guid, 8); memcpy(host->initiator_port_id + 8, &device->node_guid, 8);
host->pd = ib_alloc_pd(device); host->pd = ib_alloc_pd(device);
if (IS_ERR(host->pd)) if (IS_ERR(host->pd))
...@@ -1580,22 +1579,11 @@ static void srp_add_one(struct ib_device *device) ...@@ -1580,22 +1579,11 @@ static void srp_add_one(struct ib_device *device)
{ {
struct list_head *dev_list; struct list_head *dev_list;
struct srp_host *host; struct srp_host *host;
struct ib_device_attr *dev_attr;
int s, e, p; int s, e, p;
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
if (!dev_attr)
return;
if (ib_query_device(device, dev_attr)) {
printk(KERN_WARNING PFX "Couldn't query node GUID for %s.\n",
device->name);
goto out;
}
dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
if (!dev_list) if (!dev_list)
goto out; return;
INIT_LIST_HEAD(dev_list); INIT_LIST_HEAD(dev_list);
...@@ -1608,15 +1596,12 @@ static void srp_add_one(struct ib_device *device) ...@@ -1608,15 +1596,12 @@ static void srp_add_one(struct ib_device *device)
} }
for (p = s; p <= e; ++p) { for (p = s; p <= e; ++p) {
host = srp_add_port(device, dev_attr->node_guid, p); host = srp_add_port(device, p);
if (host) if (host)
list_add_tail(&host->list, dev_list); list_add_tail(&host->list, dev_list);
} }
ib_set_client_data(device, &srp_client, dev_list); ib_set_client_data(device, &srp_client, dev_list);
out:
kfree(dev_attr);
} }
static void srp_remove_one(struct ib_device *device) static void srp_remove_one(struct ib_device *device)
......
...@@ -88,7 +88,6 @@ enum ib_atomic_cap { ...@@ -88,7 +88,6 @@ enum ib_atomic_cap {
struct ib_device_attr { struct ib_device_attr {
u64 fw_ver; u64 fw_ver;
__be64 node_guid;
__be64 sys_image_guid; __be64 sys_image_guid;
u64 max_mr_size; u64 max_mr_size;
u64 page_size_cap; u64 page_size_cap;
...@@ -951,6 +950,7 @@ struct ib_device { ...@@ -951,6 +950,7 @@ struct ib_device {
u64 uverbs_cmd_mask; u64 uverbs_cmd_mask;
int uverbs_abi_ver; int uverbs_abi_ver;
__be64 node_guid;
u8 node_type; u8 node_type;
u8 phys_port_cnt; u8 phys_port_cnt;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment