Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
fb7ffeb1
Commit
fb7ffeb1
authored
Jan 13, 2006
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
parents
69eebed2
95ed644f
Changes
21
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
287 additions
and
352 deletions
+287
-352
drivers/infiniband/core/cm.c
drivers/infiniband/core/cm.c
+5
-24
drivers/infiniband/core/device.c
drivers/infiniband/core/device.c
+11
-12
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/sysfs.c
+5
-17
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucm.c
+12
-11
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs.h
+3
-2
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_cmd.c
+76
-76
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_main.c
+4
-4
drivers/infiniband/hw/mthca/mthca_av.c
drivers/infiniband/hw/mthca/mthca_av.c
+6
-4
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_cmd.c
+4
-3
drivers/infiniband/hw/mthca/mthca_dev.h
drivers/infiniband/hw/mthca/mthca_dev.h
+1
-0
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/mthca/mthca_eq.c
+15
-13
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_provider.c
+78
-54
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/hw/mthca/mthca_qp.c
+1
-1
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib.h
+3
-3
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
+10
-21
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
+6
-6
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+33
-72
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+4
-4
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+5
-5
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srp/ib_srp.c
+4
-19
include/rdma/ib_verbs.h
include/rdma/ib_verbs.h
+1
-1
No files found.
drivers/infiniband/core/cm.c
View file @
fb7ffeb1
...
...
@@ -3163,22 +3163,6 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
}
EXPORT_SYMBOL
(
ib_cm_init_qp_attr
);
static
__be64
cm_get_ca_guid
(
struct
ib_device
*
device
)
{
struct
ib_device_attr
*
device_attr
;
__be64
guid
;
int
ret
;
device_attr
=
kmalloc
(
sizeof
*
device_attr
,
GFP_KERNEL
);
if
(
!
device_attr
)
return
0
;
ret
=
ib_query_device
(
device
,
device_attr
);
guid
=
ret
?
0
:
device_attr
->
node_guid
;
kfree
(
device_attr
);
return
guid
;
}
static
void
cm_add_one
(
struct
ib_device
*
device
)
{
struct
cm_device
*
cm_dev
;
...
...
@@ -3200,9 +3184,7 @@ static void cm_add_one(struct ib_device *device)
return
;
cm_dev
->
device
=
device
;
cm_dev
->
ca_guid
=
cm_get_ca_guid
(
device
);
if
(
!
cm_dev
->
ca_guid
)
goto
error1
;
cm_dev
->
ca_guid
=
device
->
node_guid
;
set_bit
(
IB_MGMT_METHOD_SEND
,
reg_req
.
method_mask
);
for
(
i
=
1
;
i
<=
device
->
phys_port_cnt
;
i
++
)
{
...
...
@@ -3217,11 +3199,11 @@ static void cm_add_one(struct ib_device *device)
cm_recv_handler
,
port
);
if
(
IS_ERR
(
port
->
mad_agent
))
goto
error
2
;
goto
error
1
;
ret
=
ib_modify_port
(
device
,
i
,
0
,
&
port_modify
);
if
(
ret
)
goto
error
3
;
goto
error
2
;
}
ib_set_client_data
(
device
,
&
cm_client
,
cm_dev
);
...
...
@@ -3230,9 +3212,9 @@ static void cm_add_one(struct ib_device *device)
write_unlock_irqrestore
(
&
cm
.
device_lock
,
flags
);
return
;
error3:
ib_unregister_mad_agent
(
port
->
mad_agent
);
error2:
ib_unregister_mad_agent
(
port
->
mad_agent
);
error1:
port_modify
.
set_port_cap_mask
=
0
;
port_modify
.
clr_port_cap_mask
=
IB_PORT_CM_SUP
;
while
(
--
i
)
{
...
...
@@ -3240,7 +3222,6 @@ static void cm_add_one(struct ib_device *device)
ib_modify_port
(
device
,
port
->
port_num
,
0
,
&
port_modify
);
ib_unregister_mad_agent
(
port
->
mad_agent
);
}
error1:
kfree
(
cm_dev
);
}
...
...
drivers/infiniband/core/device.c
View file @
fb7ffeb1
...
...
@@ -38,8 +38,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <asm/semaphore.h>
#include <linux/mutex.h>
#include "core_priv.h"
...
...
@@ -57,13 +56,13 @@ static LIST_HEAD(device_list);
static
LIST_HEAD
(
client_list
);
/*
* device_
sem
protects access to both device_list and client_list.
* device_
mutex
protects access to both device_list and client_list.
* There's no real point to using multiple locks or something fancier
* like an rwsem: we always access both lists, and we're always
* modifying one list or the other list. In any case this is not a
* hot path so there's no point in trying to optimize.
*/
static
DE
CLARE_MUTEX
(
device_sem
);
static
DE
FINE_MUTEX
(
device_mutex
);
static
int
ib_device_check_mandatory
(
struct
ib_device
*
device
)
{
...
...
@@ -221,7 +220,7 @@ int ib_register_device(struct ib_device *device)
{
int
ret
;
down
(
&
device_sem
);
mutex_lock
(
&
device_mutex
);
if
(
strchr
(
device
->
name
,
'%'
))
{
ret
=
alloc_name
(
device
->
name
);
...
...
@@ -259,7 +258,7 @@ int ib_register_device(struct ib_device *device)
}
out:
up
(
&
device_sem
);
mutex_unlock
(
&
device_mutex
);
return
ret
;
}
EXPORT_SYMBOL
(
ib_register_device
);
...
...
@@ -276,7 +275,7 @@ void ib_unregister_device(struct ib_device *device)
struct
ib_client_data
*
context
,
*
tmp
;
unsigned
long
flags
;
down
(
&
device_sem
);
mutex_lock
(
&
device_mutex
);
list_for_each_entry_reverse
(
client
,
&
client_list
,
list
)
if
(
client
->
remove
)
...
...
@@ -284,7 +283,7 @@ void ib_unregister_device(struct ib_device *device)
list_del
(
&
device
->
core_list
);
up
(
&
device_sem
);
mutex_unlock
(
&
device_mutex
);
spin_lock_irqsave
(
&
device
->
client_data_lock
,
flags
);
list_for_each_entry_safe
(
context
,
tmp
,
&
device
->
client_data_list
,
list
)
...
...
@@ -312,14 +311,14 @@ int ib_register_client(struct ib_client *client)
{
struct
ib_device
*
device
;
down
(
&
device_sem
);
mutex_lock
(
&
device_mutex
);
list_add_tail
(
&
client
->
list
,
&
client_list
);
list_for_each_entry
(
device
,
&
device_list
,
core_list
)
if
(
client
->
add
&&
!
add_client_context
(
device
,
client
))
client
->
add
(
device
);
up
(
&
device_sem
);
mutex_unlock
(
&
device_mutex
);
return
0
;
}
...
...
@@ -339,7 +338,7 @@ void ib_unregister_client(struct ib_client *client)
struct
ib_device
*
device
;
unsigned
long
flags
;
down
(
&
device_sem
);
mutex_lock
(
&
device_mutex
);
list_for_each_entry
(
device
,
&
device_list
,
core_list
)
{
if
(
client
->
remove
)
...
...
@@ -355,7 +354,7 @@ void ib_unregister_client(struct ib_client *client)
}
list_del
(
&
client
->
list
);
up
(
&
device_sem
);
mutex_unlock
(
&
device_mutex
);
}
EXPORT_SYMBOL
(
ib_unregister_client
);
...
...
drivers/infiniband/core/sysfs.c
View file @
fb7ffeb1
...
...
@@ -445,13 +445,7 @@ static int ib_device_uevent(struct class_device *cdev, char **envp,
return
-
ENOMEM
;
/*
* It might be nice to pass the node GUID with the event, but
* right now the only way to get it is to query the device
* provider, and this can crash during device removal because
* we are will be running after driver removal has started.
* We could add a node_guid field to struct ib_device, or we
* could just let userspace read the node GUID from sysfs when
* devices are added.
* It would be nice to pass the node GUID with the event...
*/
envp
[
i
]
=
NULL
;
...
...
@@ -623,21 +617,15 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
static
ssize_t
show_node_guid
(
struct
class_device
*
cdev
,
char
*
buf
)
{
struct
ib_device
*
dev
=
container_of
(
cdev
,
struct
ib_device
,
class_dev
);
struct
ib_device_attr
attr
;
ssize_t
ret
;
if
(
!
ibdev_is_alive
(
dev
))
return
-
ENODEV
;
ret
=
ib_query_device
(
dev
,
&
attr
);
if
(
ret
)
return
ret
;
return
sprintf
(
buf
,
"%04x:%04x:%04x:%04x
\n
"
,
be16_to_cpu
(((
__be16
*
)
&
attr
.
node_guid
)[
0
]),
be16_to_cpu
(((
__be16
*
)
&
attr
.
node_guid
)[
1
]),
be16_to_cpu
(((
__be16
*
)
&
attr
.
node_guid
)[
2
]),
be16_to_cpu
(((
__be16
*
)
&
attr
.
node_guid
)[
3
]));
be16_to_cpu
(((
__be16
*
)
&
dev
->
node_guid
)[
0
]),
be16_to_cpu
(((
__be16
*
)
&
dev
->
node_guid
)[
1
]),
be16_to_cpu
(((
__be16
*
)
&
dev
->
node_guid
)[
2
]),
be16_to_cpu
(((
__be16
*
)
&
dev
->
node_guid
)[
3
]));
}
static
CLASS_DEVICE_ATTR
(
node_type
,
S_IRUGO
,
show_node_type
,
NULL
);
...
...
drivers/infiniband/core/ucm.c
View file @
fb7ffeb1
...
...
@@ -42,6 +42,7 @@
#include <linux/mount.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
...
...
@@ -113,7 +114,7 @@ static struct ib_client ucm_client = {
.
remove
=
ib_ucm_remove_one
};
static
DE
CLAR
E_MUTEX
(
ctx_id_mutex
);
static
DE
FIN
E_MUTEX
(
ctx_id_mutex
);
static
DEFINE_IDR
(
ctx_id_table
);
static
DECLARE_BITMAP
(
dev_map
,
IB_UCM_MAX_DEVICES
);
...
...
@@ -121,7 +122,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
{
struct
ib_ucm_context
*
ctx
;
down
(
&
ctx_id_mutex
);
mutex_lock
(
&
ctx_id_mutex
);
ctx
=
idr_find
(
&
ctx_id_table
,
id
);
if
(
!
ctx
)
ctx
=
ERR_PTR
(
-
ENOENT
);
...
...
@@ -129,7 +130,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
ctx
=
ERR_PTR
(
-
EINVAL
);
else
atomic_inc
(
&
ctx
->
ref
);
up
(
&
ctx_id_mutex
);
mutex_unlock
(
&
ctx_id_mutex
);
return
ctx
;
}
...
...
@@ -186,9 +187,9 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
if
(
!
result
)
goto
error
;
down
(
&
ctx_id_mutex
);
mutex_lock
(
&
ctx_id_mutex
);
result
=
idr_get_new
(
&
ctx_id_table
,
ctx
,
&
ctx
->
id
);
up
(
&
ctx_id_mutex
);
mutex_unlock
(
&
ctx_id_mutex
);
}
while
(
result
==
-
EAGAIN
);
if
(
result
)
...
...
@@ -550,9 +551,9 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
err2:
ib_destroy_cm_id
(
ctx
->
cm_id
);
err1:
down
(
&
ctx_id_mutex
);
mutex_lock
(
&
ctx_id_mutex
);
idr_remove
(
&
ctx_id_table
,
ctx
->
id
);
up
(
&
ctx_id_mutex
);
mutex_unlock
(
&
ctx_id_mutex
);
kfree
(
ctx
);
return
result
;
}
...
...
@@ -572,7 +573,7 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
if
(
copy_from_user
(
&
cmd
,
inbuf
,
sizeof
(
cmd
)))
return
-
EFAULT
;
down
(
&
ctx_id_mutex
);
mutex_lock
(
&
ctx_id_mutex
);
ctx
=
idr_find
(
&
ctx_id_table
,
cmd
.
id
);
if
(
!
ctx
)
ctx
=
ERR_PTR
(
-
ENOENT
);
...
...
@@ -580,7 +581,7 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
ctx
=
ERR_PTR
(
-
EINVAL
);
else
idr_remove
(
&
ctx_id_table
,
ctx
->
id
);
up
(
&
ctx_id_mutex
);
mutex_unlock
(
&
ctx_id_mutex
);
if
(
IS_ERR
(
ctx
))
return
PTR_ERR
(
ctx
);
...
...
@@ -1280,9 +1281,9 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
struct
ib_ucm_context
,
file_list
);
up
(
&
file
->
mutex
);
down
(
&
ctx_id_mutex
);
mutex_lock
(
&
ctx_id_mutex
);
idr_remove
(
&
ctx_id_table
,
ctx
->
id
);
up
(
&
ctx_id_mutex
);
mutex_unlock
(
&
ctx_id_mutex
);
ib_destroy_cm_id
(
ctx
->
cm_id
);
ib_ucm_cleanup_events
(
ctx
);
...
...
drivers/infiniband/core/uverbs.h
View file @
fb7ffeb1
...
...
@@ -41,6 +41,7 @@
#include <linux/kref.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
...
...
@@ -88,7 +89,7 @@ struct ib_uverbs_event_file {
struct
ib_uverbs_file
{
struct
kref
ref
;
struct
semaphore
mutex
;
struct
mutex
mutex
;
struct
ib_uverbs_device
*
device
;
struct
ib_ucontext
*
ucontext
;
struct
ib_event_handler
event_handler
;
...
...
@@ -131,7 +132,7 @@ struct ib_ucq_object {
u32
async_events_reported
;
};
extern
struct
semaphore
ib_uverbs_idr_mutex
;
extern
struct
mutex
ib_uverbs_idr_mutex
;
extern
struct
idr
ib_uverbs_pd_idr
;
extern
struct
idr
ib_uverbs_mr_idr
;
extern
struct
idr
ib_uverbs_mw_idr
;
...
...
drivers/infiniband/core/uverbs_cmd.c
View file @
fb7ffeb1
This diff is collapsed.
Click to expand it.
drivers/infiniband/core/uverbs_main.c
View file @
fb7ffeb1
...
...
@@ -66,7 +66,7 @@ enum {
static
struct
class
*
uverbs_class
;
DE
CLAR
E_MUTEX
(
ib_uverbs_idr_mutex
);
DE
FIN
E_MUTEX
(
ib_uverbs_idr_mutex
);
DEFINE_IDR
(
ib_uverbs_pd_idr
);
DEFINE_IDR
(
ib_uverbs_mr_idr
);
DEFINE_IDR
(
ib_uverbs_mw_idr
);
...
...
@@ -180,7 +180,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
if
(
!
context
)
return
0
;
down
(
&
ib_uverbs_idr_mutex
);
mutex_lock
(
&
ib_uverbs_idr_mutex
);
list_for_each_entry_safe
(
uobj
,
tmp
,
&
context
->
ah_list
,
list
)
{
struct
ib_ah
*
ah
=
idr_find
(
&
ib_uverbs_ah_idr
,
uobj
->
id
);
...
...
@@ -250,7 +250,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree
(
uobj
);
}
up
(
&
ib_uverbs_idr_mutex
);
mutex_unlock
(
&
ib_uverbs_idr_mutex
);
return
context
->
device
->
dealloc_ucontext
(
context
);
}
...
...
@@ -653,7 +653,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
file
->
ucontext
=
NULL
;
file
->
async_file
=
NULL
;
kref_init
(
&
file
->
ref
);
init_MUTEX
(
&
file
->
mutex
);
mutex_init
(
&
file
->
mutex
);
filp
->
private_data
=
file
;
...
...
drivers/infiniband/hw/mthca/mthca_av.c
View file @
fb7ffeb1
...
...
@@ -163,6 +163,11 @@ int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
return
0
;
}
int
mthca_ah_grh_present
(
struct
mthca_ah
*
ah
)
{
return
!!
(
ah
->
av
->
g_slid
&
0x80
);
}
int
mthca_read_ah
(
struct
mthca_dev
*
dev
,
struct
mthca_ah
*
ah
,
struct
ib_ud_header
*
header
)
{
...
...
@@ -172,8 +177,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
header
->
lrh
.
service_level
=
be32_to_cpu
(
ah
->
av
->
sl_tclass_flowlabel
)
>>
28
;
header
->
lrh
.
destination_lid
=
ah
->
av
->
dlid
;
header
->
lrh
.
source_lid
=
cpu_to_be16
(
ah
->
av
->
g_slid
&
0x7f
);
if
(
ah
->
av
->
g_slid
&
0x80
)
{
header
->
grh_present
=
1
;
if
(
mthca_ah_grh_present
(
ah
))
{
header
->
grh
.
traffic_class
=
(
be32_to_cpu
(
ah
->
av
->
sl_tclass_flowlabel
)
>>
20
)
&
0xff
;
header
->
grh
.
flow_label
=
...
...
@@ -184,8 +188,6 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
&
header
->
grh
.
source_gid
);
memcpy
(
header
->
grh
.
destination_gid
.
raw
,
ah
->
av
->
dgid
,
16
);
}
else
{
header
->
grh_present
=
0
;
}
return
0
;
...
...
drivers/infiniband/hw/mthca/mthca_cmd.c
View file @
fb7ffeb1
...
...
@@ -606,7 +606,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
err
=
-
EINVAL
;
goto
out
;
}
for
(
i
=
0
;
i
<
mthca_icm_size
(
&
iter
)
/
(
1
<<
lg
)
;
++
i
)
{
for
(
i
=
0
;
i
<
mthca_icm_size
(
&
iter
)
>>
lg
;
++
i
)
{
if
(
virt
!=
-
1
)
{
pages
[
nent
*
2
]
=
cpu_to_be64
(
virt
);
virt
+=
1
<<
lg
;
...
...
@@ -727,8 +727,8 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
* system pages needed.
*/
dev
->
fw
.
arbel
.
fw_pages
=
(
dev
->
fw
.
arbel
.
fw_pages
+
(
1
<<
(
PAGE_SHIFT
-
12
))
-
1
)
>>
(
PAGE_SHIFT
-
12
);
ALIGN
(
dev
->
fw
.
arbel
.
fw_pages
,
PAGE_SIZE
>>
12
)
>>
(
PAGE_SHIFT
-
12
);
mthca_dbg
(
dev
,
"Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx
\n
"
,
(
unsigned
long
long
)
dev
->
fw
.
arbel
.
clr_int_base
,
...
...
@@ -1445,6 +1445,7 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
* pages needed.
*/
*
aux_pages
=
(
*
aux_pages
+
(
1
<<
(
PAGE_SHIFT
-
12
))
-
1
)
>>
(
PAGE_SHIFT
-
12
);
*
aux_pages
=
ALIGN
(
*
aux_pages
,
PAGE_SIZE
>>
12
)
>>
(
PAGE_SHIFT
-
12
);
return
0
;
}
...
...
drivers/infiniband/hw/mthca/mthca_dev.h
View file @
fb7ffeb1
...
...
@@ -520,6 +520,7 @@ int mthca_create_ah(struct mthca_dev *dev,
int
mthca_destroy_ah
(
struct
mthca_dev
*
dev
,
struct
mthca_ah
*
ah
);
int
mthca_read_ah
(
struct
mthca_dev
*
dev
,
struct
mthca_ah
*
ah
,
struct
ib_ud_header
*
header
);
int
mthca_ah_grh_present
(
struct
mthca_ah
*
ah
);
int
mthca_multicast_attach
(
struct
ib_qp
*
ibqp
,
union
ib_gid
*
gid
,
u16
lid
);
int
mthca_multicast_detach
(
struct
ib_qp
*
ibqp
,
union
ib_gid
*
gid
,
u16
lid
);
...
...
drivers/infiniband/hw/mthca/mthca_eq.c
View file @
fb7ffeb1
...
...
@@ -45,6 +45,7 @@
enum
{
MTHCA_NUM_ASYNC_EQE
=
0x80
,
MTHCA_NUM_CMD_EQE
=
0x80
,
MTHCA_NUM_SPARE_EQE
=
0x80
,
MTHCA_EQ_ENTRY_SIZE
=
0x20
};
...
...
@@ -277,11 +278,10 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
{
struct
mthca_eqe
*
eqe
;
int
disarm_cqn
;
int
eqes_found
=
0
;
int
eqes_found
=
0
;
int
set_ci
=
0
;
while
((
eqe
=
next_eqe_sw
(
eq
)))
{
int
set_ci
=
0
;
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
...
...
@@ -345,12 +345,6 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
be16_to_cpu
(
eqe
->
event
.
cmd
.
token
),
eqe
->
event
.
cmd
.
status
,
be64_to_cpu
(
eqe
->
event
.
cmd
.
out_param
));
/*
* cmd_event() may add more commands.
* The card will think the queue has overflowed if
* we don't tell it we've been processing events.
*/
set_ci
=
1
;
break
;
case
MTHCA_EVENT_TYPE_PORT_CHANGE
:
...
...
@@ -385,8 +379,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
set_eqe_hw
(
eqe
);
++
eq
->
cons_index
;
eqes_found
=
1
;
++
set_ci
;
if
(
unlikely
(
set_ci
))
{
/*
* The HCA will think the queue has overflowed if we
* don't tell it we've been processing events. We
* create our EQs with MTHCA_NUM_SPARE_EQE extra
* entries, so we must update our consumer index at
* least that often.
*/
if
(
unlikely
(
set_ci
>=
MTHCA_NUM_SPARE_EQE
))
{
/*
* Conditional on hca_type is OK here because
* this is a rare case, not the fast path.
...
...
@@ -862,19 +864,19 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
intr
=
(
dev
->
mthca_flags
&
MTHCA_FLAG_MSI
)
?
128
:
dev
->
eq_table
.
inta_pin
;
err
=
mthca_create_eq
(
dev
,
dev
->
limits
.
num_cqs
,
err
=
mthca_create_eq
(
dev
,
dev
->
limits
.
num_cqs
+
MTHCA_NUM_SPARE_EQE
,
(
dev
->
mthca_flags
&
MTHCA_FLAG_MSI_X
)
?
128
:
intr
,
&
dev
->
eq_table
.
eq
[
MTHCA_EQ_COMP
]);
if
(
err
)
goto
err_out_unmap
;
err
=
mthca_create_eq
(
dev
,
MTHCA_NUM_ASYNC_EQE
,
err
=
mthca_create_eq
(
dev
,
MTHCA_NUM_ASYNC_EQE
+
MTHCA_NUM_SPARE_EQE
,
(
dev
->
mthca_flags
&
MTHCA_FLAG_MSI_X
)
?
129
:
intr
,
&
dev
->
eq_table
.
eq
[
MTHCA_EQ_ASYNC
]);
if
(
err
)
goto
err_out_comp
;
err
=
mthca_create_eq
(
dev
,
MTHCA_NUM_CMD_EQE
,
err
=
mthca_create_eq
(
dev
,
MTHCA_NUM_CMD_EQE
+
MTHCA_NUM_SPARE_EQE
,
(
dev
->
mthca_flags
&
MTHCA_FLAG_MSI_X
)
?
130
:
intr
,
&
dev
->
eq_table
.
eq
[
MTHCA_EQ_CMD
]);
if
(
err
)
...
...
drivers/infiniband/hw/mthca/mthca_provider.c
View file @
fb7ffeb1
...
...
@@ -33,7 +33,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* $Id: mthca_provider.c
1397 2004-12-28 05:09:0
0Z roland $
* $Id: mthca_provider.c
4859 2006-01-09 21:55:1
0Z roland $
*/
#include <rdma/ib_smi.h>
...
...
@@ -45,6 +45,14 @@
#include "mthca_user.h"
#include "mthca_memfree.h"
static
void
init_query_mad
(
struct
ib_smp
*
mad
)
{
mad
->
base_version
=
1
;
mad
->
mgmt_class
=
IB_MGMT_CLASS_SUBN_LID_ROUTED
;
mad
->
class_version
=
1
;
mad
->
method
=
IB_MGMT_METHOD_GET
;
}
static
int
mthca_query_device
(
struct
ib_device
*
ibdev
,
struct
ib_device_attr
*
props
)
{
...
...
@@ -55,7 +63,7 @@ static int mthca_query_device(struct ib_device *ibdev,
u8
status
;
in_mad
=
k
m
alloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
in_mad
=
k
z
alloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
out_mad
,
GFP_KERNEL
);
if
(
!
in_mad
||
!
out_mad
)
goto
out
;
...
...
@@ -64,12 +72,8 @@ static int mthca_query_device(struct ib_device *ibdev,
props
->
fw_ver
=
mdev
->
fw_ver
;
memset
(
in_mad
,
0
,
sizeof
*
in_mad
);
in_mad
->
base_version
=
1
;
in_mad
->
mgmt_class
=
IB_MGMT_CLASS_SUBN_LID_ROUTED
;
in_mad
->
class_version
=
1
;
in_mad
->
method
=
IB_MGMT_METHOD_GET
;
in_mad
->
attr_id
=
IB_SMP_ATTR_NODE_INFO
;
init_query_mad
(
in_mad
);
in_mad
->
attr_id
=
IB_SMP_ATTR_NODE_INFO
;
err
=
mthca_MAD_IFC
(
mdev
,
1
,
1
,
1
,
NULL
,
NULL
,
in_mad
,
out_mad
,
...
...
@@ -87,7 +91,6 @@ static int mthca_query_device(struct ib_device *ibdev,
props
->
vendor_part_id
=
be16_to_cpup
((
__be16
*
)
(
out_mad
->
data
+
30
));
props
->
hw_ver
=
be32_to_cpup
((
__be32
*
)
(
out_mad
->
data
+
32
));
memcpy
(
&
props
->
sys_image_guid
,
out_mad
->
data
+
4
,
8
);
memcpy
(
&
props
->
node_guid
,
out_mad
->
data
+
12
,
8
);
props
->
max_mr_size
=
~
0ull
;
props
->
page_size_cap
=
mdev
->
limits
.
page_size_cap
;
...
...
@@ -128,20 +131,16 @@ static int mthca_query_port(struct ib_device *ibdev,
int
err
=
-
ENOMEM
;
u8
status
;
in_mad
=
k
m
alloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
in_mad
=
k
z
alloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
out_mad
,
GFP_KERNEL
);
if
(
!
in_mad
||
!
out_mad
)
goto
out
;
memset
(
props
,
0
,
sizeof
*
props
);
memset
(
in_mad
,
0
,
sizeof
*
in_mad
);
in_mad
->
base_version
=
1
;
in_mad
->
mgmt_class
=
IB_MGMT_CLASS_SUBN_LID_ROUTED
;
in_mad
->
class_version
=
1
;
in_mad
->
method
=
IB_MGMT_METHOD_GET
;
in_mad
->
attr_id
=
IB_SMP_ATTR_PORT_INFO
;
in_mad
->
attr_mod
=
cpu_to_be32
(
port
);
init_query_mad
(
in_mad
);
in_mad
->
attr_id
=
IB_SMP_ATTR_PORT_INFO
;
in_mad
->
attr_mod
=
cpu_to_be32
(
port
);
err
=
mthca_MAD_IFC
(
to_mdev
(
ibdev
),
1
,
1
,
port
,
NULL
,
NULL
,
in_mad
,
out_mad
,
...
...
@@ -220,18 +219,14 @@ static int mthca_query_pkey(struct ib_device *ibdev,
int
err
=
-
ENOMEM
;
u8
status
;
in_mad
=
k
m
alloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
in_mad
=
k
z
alloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
out_mad
,
GFP_KERNEL
);
if
(
!
in_mad
||
!
out_mad
)
goto
out
;
memset
(
in_mad
,
0
,
sizeof
*
in_mad
);
in_mad
->
base_version
=
1
;
in_mad
->
mgmt_class
=
IB_MGMT_CLASS_SUBN_LID_ROUTED
;
in_mad
->
class_version
=
1
;
in_mad
->
method
=
IB_MGMT_METHOD_GET
;
in_mad
->
attr_id
=
IB_SMP_ATTR_PKEY_TABLE
;
in_mad
->
attr_mod
=
cpu_to_be32
(
index
/
32
);
init_query_mad
(
in_mad
);
in_mad
->
attr_id
=
IB_SMP_ATTR_PKEY_TABLE
;
in_mad
->
attr_mod
=
cpu_to_be32
(
index
/
32
);
err
=
mthca_MAD_IFC
(
to_mdev
(
ibdev
),
1
,
1
,
port
,
NULL
,
NULL
,
in_mad
,
out_mad
,
...
...
@@ -259,18 +254,14 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
int
err
=
-
ENOMEM
;
u8
status
;
in_mad
=
k
m
alloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
in_mad
=
k
z
alloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
out_mad
,
GFP_KERNEL
);
if
(
!
in_mad
||
!
out_mad
)
goto
out
;
memset
(
in_mad
,
0
,
sizeof
*
in_mad
);
in_mad
->
base_version
=
1
;
in_mad
->
mgmt_class
=
IB_MGMT_CLASS_SUBN_LID_ROUTED
;
in_mad
->
class_version
=
1
;
in_mad
->
method
=
IB_MGMT_METHOD_GET
;
in_mad
->
attr_id
=
IB_SMP_ATTR_PORT_INFO
;
in_mad
->
attr_mod
=
cpu_to_be32
(
port
);
init_query_mad
(
in_mad
);
in_mad
->
attr_id
=
IB_SMP_ATTR_PORT_INFO
;
in_mad
->
attr_mod
=
cpu_to_be32
(
port
);
err
=
mthca_MAD_IFC
(
to_mdev
(
ibdev
),
1
,
1
,
port
,
NULL
,
NULL
,
in_mad
,
out_mad
,
...
...
@@ -284,13 +275,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
memcpy
(
gid
->
raw
,
out_mad
->
data
+
8
,
8
);
memset
(
in_mad
,
0
,
sizeof
*
in_mad
);
in_mad
->
base_version
=
1
;
in_mad
->
mgmt_class
=
IB_MGMT_CLASS_SUBN_LID_ROUTED
;
in_mad
->
class_version
=
1
;
in_mad
->
method
=
IB_MGMT_METHOD_GET
;
in_mad
->
attr_id
=
IB_SMP_ATTR_GUID_INFO
;
in_mad
->
attr_mod
=
cpu_to_be32
(
index
/
8
);
init_query_mad
(
in_mad
);
in_mad
->
attr_id
=
IB_SMP_ATTR_GUID_INFO
;
in_mad
->
attr_mod
=
cpu_to_be32
(
index
/
8
);
err
=
mthca_MAD_IFC
(
to_mdev
(
ibdev
),
1
,
1
,
port
,
NULL
,
NULL
,
in_mad
,
out_mad
,
...
...
@@ -458,8 +445,10 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
if
(
pd
->
uobject
)
{
context
=
to_mucontext
(
pd
->
uobject
->
context
);
if
(
ib_copy_from_udata
(
&
ucmd
,
udata
,
sizeof
ucmd
))
return
ERR_PTR
(
-
EFAULT
);
if
(
ib_copy_from_udata
(
&
ucmd
,
udata
,
sizeof
ucmd
))
{
err
=
-
EFAULT
;
goto
err_free
;
}
err
=
mthca_map_user_db
(
to_mdev
(
pd
->
device
),
&
context
->
uar
,
context
->
db_tab
,
ucmd
.
db_index
,
...
...
@@ -535,8 +524,10 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
if
(
pd
->
uobject
)
{
context
=
to_mucontext
(
pd
->
uobject
->
context
);
if
(
ib_copy_from_udata
(
&
ucmd
,
udata
,
sizeof
ucmd
))
if
(
ib_copy_from_udata
(
&
ucmd
,
udata
,
sizeof
ucmd
))
{
kfree
(
qp
);
return
ERR_PTR
(
-
EFAULT
);
}
err
=
mthca_map_user_db
(
to_mdev
(
pd
->
device
),
&
context
->
uar
,
context
->
db_tab
,
...
...
@@ -783,24 +774,20 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
if
((
*
iova_start
&
~
PAGE_MASK
)
!=
(
buffer_list
[
0
].
addr
&
~
PAGE_MASK
))
return
ERR_PTR
(
-
EINVAL
);
if
(
num_phys_buf
>
1
&&
((
buffer_list
[
0
].
addr
+
buffer_list
[
0
].
size
)
&
~
PAGE_MASK
))
return
ERR_PTR
(
-
EINVAL
);
mask
=
0
;
total_size
=
0
;
for
(
i
=
0
;
i
<
num_phys_buf
;
++
i
)
{
if
(
i
!=
0
&&
buffer_list
[
i
].
addr
&
~
PAGE_MASK
)
return
ERR_PTR
(
-
EINVAL
);
if
(
i
!=
0
&&
i
!=
num_phys_buf
-
1
&&
(
buffer_list
[
i
].
size
&
~
PAGE_MASK
))
return
ERR_PTR
(
-
EINVAL
);
if
(
i
!=
0
)
mask
|=
buffer_list
[
i
].
addr
;
if
(
i
!=
num_phys_buf
-
1
)
mask
|=
buffer_list
[
i
].
addr
+
buffer_list
[
i
].
size
;
total_size
+=
buffer_list
[
i
].
size
;
if
(
i
>
0
)
mask
|=
buffer_list
[
i
].
addr
;
}
if
(
mask
&
~
PAGE_MASK
)
return
ERR_PTR
(
-
EINVAL
);
/* Find largest page shift we can use to cover buffers */
for
(
shift
=
PAGE_SHIFT
;
shift
<
31
;
++
shift
)
if
(
num_phys_buf
>
1
)
{
...
...
@@ -1070,11 +1057,48 @@ static struct class_device_attribute *mthca_class_attributes[] = {
&
class_device_attr_board_id
};
static
int
mthca_init_node_data
(
struct
mthca_dev
*
dev
)
{
struct
ib_smp
*
in_mad
=
NULL
;
struct
ib_smp
*
out_mad
=
NULL
;
int
err
=
-
ENOMEM
;
u8
status
;
in_mad
=
kzalloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
out_mad
,
GFP_KERNEL
);
if
(
!
in_mad
||
!
out_mad
)
goto
out
;
init_query_mad
(
in_mad
);
in_mad
->
attr_id
=
IB_SMP_ATTR_NODE_INFO
;
err
=
mthca_MAD_IFC
(
dev
,
1
,
1
,
1
,
NULL
,
NULL
,
in_mad
,
out_mad
,
&
status
);
if
(
err
)
goto
out
;
if
(
status
)
{
err
=
-
EINVAL
;
goto
out
;
}
memcpy
(
&
dev
->
ib_dev
.
node_guid
,
out_mad
->
data
+
12
,
8
);
out:
kfree
(
in_mad
);
kfree
(
out_mad
);
return
err
;
}
int
mthca_register_device
(
struct
mthca_dev
*
dev
)
{
int
ret
;
int
i
;
ret
=
mthca_init_node_data
(
dev
);
if
(
ret
)
return
ret
;
strlcpy
(
dev
->
ib_dev
.
name
,
"mthca%d"
,
IB_DEVICE_NAME_MAX
);
dev
->
ib_dev
.
owner
=
THIS_MODULE
;
...
...
drivers/infiniband/hw/mthca/mthca_qp.c
View file @
fb7ffeb1
...
...
@@ -1434,7 +1434,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
u16
pkey
;
ib_ud_header_init
(
256
,
/* assume a MAD */
sqp
->
ud_header
.
grh_present
,
mthca_ah_grh_present
(
to_mah
(
wr
->
wr
.
ud
.
ah
))
,
&
sqp
->
ud_header
);
err
=
mthca_read_ah
(
dev
,
to_mah
(
wr
->
wr
.
ud
.
ah
),
&
sqp
->
ud_header
);
...
...
drivers/infiniband/ulp/ipoib/ipoib.h
View file @
fb7ffeb1
...
...
@@ -45,11 +45,11 @@
#include <linux/config.h>
#include <linux/kref.h>
#include <linux/if_infiniband.h>
#include <linux/mutex.h>
#include <net/neighbour.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
...
...
@@ -123,8 +123,8 @@ struct ipoib_dev_priv {
unsigned
long
flags
;
struct
semaphore
mcast_mutex
;
struct
semaphore
vlan_mutex
;
struct
mutex
mcast_mutex
;
struct
mutex
vlan_mutex
;
struct
rb_root
path_tree
;
struct
list_head
path_list
;
...
...
drivers/infiniband/ulp/ipoib/ipoib_ib.c
View file @
fb7ffeb1
...
...
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(data_debug_level,
#define IPOIB_OP_RECV (1ul << 31)
static
DE
CLARE_MUTEX
(
pkey_sem
);
static
DE
FINE_MUTEX
(
pkey_mutex
);
struct
ipoib_ah
*
ipoib_create_ah
(
struct
net_device
*
dev
,
struct
ib_pd
*
pd
,
struct
ib_ah_attr
*
attr
)
...
...
@@ -445,25 +445,16 @@ int ipoib_ib_dev_down(struct net_device *dev)
/* Shutdown the P_Key thread if still active */
if
(
!
test_bit
(
IPOIB_PKEY_ASSIGNED
,
&
priv
->
flags
))
{
down
(
&
pkey_sem
);
mutex_lock
(
&
pkey_mutex
);
set_bit
(
IPOIB_PKEY_STOP
,
&
priv
->
flags
);
cancel_delayed_work
(
&
priv
->
pkey_task
);
up
(
&
pkey_sem
);
mutex_unlock
(
&
pkey_mutex
);
flush_workqueue
(
ipoib_workqueue
);
}
ipoib_mcast_stop_thread
(
dev
,
1
);
/*
* Flush the multicast groups first so we stop any multicast joins. The
* completion thread may have already died and we may deadlock waiting
* for the completion thread to finish some multicast joins.
*/
ipoib_mcast_dev_flush
(
dev
);
/* Delete broadcast and local addresses since they will be recreated */
ipoib_mcast_dev_down
(
dev
);
ipoib_flush_paths
(
dev
);
return
0
;
...
...
@@ -608,13 +599,13 @@ void ipoib_ib_dev_flush(void *_dev)
if
(
test_bit
(
IPOIB_FLAG_ADMIN_UP
,
&
priv
->
flags
))
ipoib_ib_dev_up
(
dev
);
down
(
&
priv
->
vlan_mutex
);
mutex_lock
(
&
priv
->
vlan_mutex
);
/* Flush any child interfaces too */
list_for_each_entry
(
cpriv
,
&
priv
->
child_intfs
,
list
)
ipoib_ib_dev_flush
(
&
cpriv
->
dev
);
up
(
&
priv
->
vlan_mutex
);
mutex_unlock
(
&
priv
->
vlan_mutex
);
}
void
ipoib_ib_dev_cleanup
(
struct
net_device
*
dev
)
...
...
@@ -624,9 +615,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
ipoib_dbg
(
priv
,
"cleaning up ib_dev
\n
"
);
ipoib_mcast_stop_thread
(
dev
,
1
);
/* Delete the broadcast address and the local address */
ipoib_mcast_dev_down
(
dev
);
ipoib_mcast_dev_flush
(
dev
);
ipoib_transport_dev_cleanup
(
dev
);
}
...
...
@@ -662,12 +651,12 @@ void ipoib_pkey_poll(void *dev_ptr)
if
(
test_bit
(
IPOIB_PKEY_ASSIGNED
,
&
priv
->
flags
))
ipoib_open
(
dev
);
else
{
down
(
&
pkey_sem
);
mutex_lock
(
&
pkey_mutex
);
if
(
!
test_bit
(
IPOIB_PKEY_STOP
,
&
priv
->
flags
))
queue_delayed_work
(
ipoib_workqueue
,
&
priv
->
pkey_task
,
HZ
);
up
(
&
pkey_sem
);
mutex_unlock
(
&
pkey_mutex
);
}
}
...
...
@@ -681,12 +670,12 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev)
/* P_Key value not assigned yet - start polling */
if
(
!
test_bit
(
IPOIB_PKEY_ASSIGNED
,
&
priv
->
flags
))
{
down
(
&
pkey_sem
);
mutex_lock
(
&
pkey_mutex
);
clear_bit
(
IPOIB_PKEY_STOP
,
&
priv
->
flags
);
queue_delayed_work
(
ipoib_workqueue
,
&
priv
->
pkey_task
,
HZ
);
up
(
&
pkey_sem
);
mutex_unlock
(
&
pkey_mutex
);
return
1
;
}
...
...
drivers/infiniband/ulp/ipoib/ipoib_main.c
View file @
fb7ffeb1
...
...
@@ -105,7 +105,7 @@ int ipoib_open(struct net_device *dev)
struct
ipoib_dev_priv
*
cpriv
;
/* Bring up any child interfaces too */
down
(
&
priv
->
vlan_mutex
);
mutex_lock
(
&
priv
->
vlan_mutex
);
list_for_each_entry
(
cpriv
,
&
priv
->
child_intfs
,
list
)
{
int
flags
;
...
...
@@ -115,7 +115,7 @@ int ipoib_open(struct net_device *dev)
dev_change_flags
(
cpriv
->
dev
,
flags
|
IFF_UP
);
}
up
(
&
priv
->
vlan_mutex
);
mutex_unlock
(
&
priv
->
vlan_mutex
);
}
netif_start_queue
(
dev
);
...
...
@@ -140,7 +140,7 @@ static int ipoib_stop(struct net_device *dev)
struct
ipoib_dev_priv
*
cpriv
;
/* Bring down any child interfaces too */
down
(
&
priv
->
vlan_mutex
);
mutex_lock
(
&
priv
->
vlan_mutex
);
list_for_each_entry
(
cpriv
,
&
priv
->
child_intfs
,
list
)
{
int
flags
;
...
...
@@ -150,7 +150,7 @@ static int ipoib_stop(struct net_device *dev)
dev_change_flags
(
cpriv
->
dev
,
flags
&
~
IFF_UP
);
}
up
(
&
priv
->
vlan_mutex
);
mutex_unlock
(
&
priv
->
vlan_mutex
);
}
return
0
;
...
...
@@ -892,8 +892,8 @@ static void ipoib_setup(struct net_device *dev)
spin_lock_init
(
&
priv
->
lock
);
spin_lock_init
(
&
priv
->
tx_lock
);
init_MUTEX
(
&
priv
->
mcast_mutex
);
init_MUTEX
(
&
priv
->
vlan_mutex
);
mutex_init
(
&
priv
->
mcast_mutex
);
mutex_init
(
&
priv
->
vlan_mutex
);
INIT_LIST_HEAD
(
&
priv
->
path_list
);
INIT_LIST_HEAD
(
&
priv
->
child_intfs
);
...
...
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
View file @
fb7ffeb1
...
...
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(mcast_debug_level,
"Enable multicast debug tracing if > 0"
);
#endif
static
DE
CLAR
E_MUTEX
(
mcast_mutex
);
static
DE
FIN
E_MUTEX
(
mcast_mutex
);
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
struct
ipoib_mcast
{
...
...
@@ -97,8 +97,6 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
struct
ipoib_dev_priv
*
priv
=
netdev_priv
(
dev
);
struct
ipoib_neigh
*
neigh
,
*
tmp
;
unsigned
long
flags
;
LIST_HEAD
(
ah_list
);
struct
ipoib_ah
*
ah
,
*
tah
;
ipoib_dbg_mcast
(
netdev_priv
(
dev
),
"deleting multicast group "
IPOIB_GID_FMT
"
\n
"
,
...
...
@@ -107,8 +105,14 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
list_for_each_entry_safe
(
neigh
,
tmp
,
&
mcast
->
neigh_list
,
list
)
{
/*
* It's safe to call ipoib_put_ah() inside priv->lock
* here, because we know that mcast->ah will always
* hold one more reference, so ipoib_put_ah() will
* never do more than decrement the ref count.
*/
if
(
neigh
->
ah
)
list_add_tail
(
&
neigh
->
ah
->
list
,
&
ah_list
);
ipoib_put_ah
(
neigh
->
ah
);
*
to_ipoib_neigh
(
neigh
->
neighbour
)
=
NULL
;
neigh
->
neighbour
->
ops
->
destructor
=
NULL
;
kfree
(
neigh
);
...
...
@@ -116,9 +120,6 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
list_for_each_entry_safe
(
ah
,
tah
,
&
ah_list
,
list
)
ipoib_put_ah
(
ah
);
if
(
mcast
->
ah
)
ipoib_put_ah
(
mcast
->
ah
);
...
...
@@ -384,10 +385,10 @@ static void ipoib_mcast_join_complete(int status,
if
(
!
status
&&
!
ipoib_mcast_join_finish
(
mcast
,
mcmember
))
{
mcast
->
backoff
=
1
;
down
(
&
mcast_mutex
);
mutex_lock
(
&
mcast_mutex
);
if
(
test_bit
(
IPOIB_MCAST_RUN
,
&
priv
->
flags
))
queue_work
(
ipoib_workqueue
,
&
priv
->
mcast_task
);
up
(
&
mcast_mutex
);
mutex_unlock
(
&
mcast_mutex
);
complete
(
&
mcast
->
done
);
return
;
}
...
...
@@ -417,7 +418,7 @@ static void ipoib_mcast_join_complete(int status,
mcast
->
query
=
NULL
;
down
(
&
mcast_mutex
);
mutex_lock
(
&
mcast_mutex
);
if
(
test_bit
(
IPOIB_MCAST_RUN
,
&
priv
->
flags
))
{
if
(
status
==
-
ETIMEDOUT
)
queue_work
(
ipoib_workqueue
,
&
priv
->
mcast_task
);
...
...
@@ -426,7 +427,7 @@ static void ipoib_mcast_join_complete(int status,
mcast
->
backoff
*
HZ
);
}
else
complete
(
&
mcast
->
done
);
up
(
&
mcast_mutex
);
mutex_unlock
(
&
mcast_mutex
);
return
;
}
...
...
@@ -481,12 +482,12 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
if
(
mcast
->
backoff
>
IPOIB_MAX_BACKOFF_SECONDS
)
mcast
->
backoff
=
IPOIB_MAX_BACKOFF_SECONDS
;
down
(
&
mcast_mutex
);
mutex_lock
(
&
mcast_mutex
);
if
(
test_bit
(
IPOIB_MCAST_RUN
,
&
priv
->
flags
))
queue_delayed_work
(
ipoib_workqueue
,
&
priv
->
mcast_task
,
mcast
->
backoff
*
HZ
);
up
(
&
mcast_mutex
);
mutex_unlock
(
&
mcast_mutex
);
}
else
mcast
->
query_id
=
ret
;
}
...
...
@@ -519,11 +520,11 @@ void ipoib_mcast_join_task(void *dev_ptr)
priv
->
broadcast
=
ipoib_mcast_alloc
(
dev
,
1
);
if
(
!
priv
->
broadcast
)
{
ipoib_warn
(
priv
,
"failed to allocate broadcast group
\n
"
);
down
(
&
mcast_mutex
);
mutex_lock
(
&
mcast_mutex
);
if
(
test_bit
(
IPOIB_MCAST_RUN
,
&
priv
->
flags
))
queue_delayed_work
(
ipoib_workqueue
,
&
priv
->
mcast_task
,
HZ
);
up
(
&
mcast_mutex
);
mutex_unlock
(
&
mcast_mutex
);
return
;
}
...
...
@@ -579,10 +580,10 @@ int ipoib_mcast_start_thread(struct net_device *dev)
ipoib_dbg_mcast
(
priv
,
"starting multicast thread
\n
"
);
down
(
&
mcast_mutex
);
mutex_lock
(
&
mcast_mutex
);
if
(
!
test_and_set_bit
(
IPOIB_MCAST_RUN
,
&
priv
->
flags
))
queue_work
(
ipoib_workqueue
,
&
priv
->
mcast_task
);
up
(
&
mcast_mutex
);
mutex_unlock
(
&
mcast_mutex
);
return
0
;
}
...
...
@@ -594,10 +595,10 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
ipoib_dbg_mcast
(
priv
,
"stopping multicast thread
\n
"
);
down
(
&
mcast_mutex
);
mutex_lock
(
&
mcast_mutex
);
clear_bit
(
IPOIB_MCAST_RUN
,
&
priv
->
flags
);
cancel_delayed_work
(
&
priv
->
mcast_task
);
up
(
&
mcast_mutex
);
mutex_unlock
(
&
mcast_mutex
);
if
(
flush
)
flush_workqueue
(
ipoib_workqueue
);
...
...
@@ -741,48 +742,23 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
{
struct
ipoib_dev_priv
*
priv
=
netdev_priv
(
dev
);
LIST_HEAD
(
remove_list
);
struct
ipoib_mcast
*
mcast
,
*
tmcast
,
*
nmcast
;
struct
ipoib_mcast
*
mcast
,
*
tmcast
;
unsigned
long
flags
;
ipoib_dbg_mcast
(
priv
,
"flushing multicast list
\n
"
);
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
list_for_each_entry_safe
(
mcast
,
tmcast
,
&
priv
->
multicast_list
,
list
)
{
nmcast
=
ipoib_mcast_alloc
(
dev
,
0
);
if
(
nmcast
)
{
nmcast
->
flags
=
mcast
->
flags
&
(
1
<<
IPOIB_MCAST_FLAG_SENDONLY
);
nmcast
->
mcmember
.
mgid
=
mcast
->
mcmember
.
mgid
;
/* Add the new group in before the to-be-destroyed group */
list_add_tail
(
&
nmcast
->
list
,
&
mcast
->
list
);
list_del_init
(
&
mcast
->
list
);
rb_replace_node
(
&
mcast
->
rb_node
,
&
nmcast
->
rb_node
,
&
priv
->
multicast_tree
);
list_add_tail
(
&
mcast
->
list
,
&
remove_list
);
}
else
{
ipoib_warn
(
priv
,
"could not reallocate multicast group "
IPOIB_GID_FMT
"
\n
"
,
IPOIB_GID_ARG
(
mcast
->
mcmember
.
mgid
));
}
list_for_each_entry_safe
(
mcast
,
tmcast
,
&
priv
->
multicast_list
,
list
)
{
list_del
(
&
mcast
->
list
);
rb_erase
(
&
mcast
->
rb_node
,
&
priv
->
multicast_tree
);
list_add_tail
(
&
mcast
->
list
,
&
remove_list
);
}
if
(
priv
->
broadcast
)
{
nmcast
=
ipoib_mcast_alloc
(
dev
,
0
);
if
(
nmcast
)
{
nmcast
->
mcmember
.
mgid
=
priv
->
broadcast
->
mcmember
.
mgid
;
rb_replace_node
(
&
priv
->
broadcast
->
rb_node
,
&
nmcast
->
rb_node
,
&
priv
->
multicast_tree
);
list_add_tail
(
&
priv
->
broadcast
->
list
,
&
remove_list
);
}
priv
->
broadcast
=
nmcast
;
rb_erase
(
&
priv
->
broadcast
->
rb_node
,
&
priv
->
multicast_tree
);
list_add_tail
(
&
priv
->
broadcast
->
list
,
&
remove_list
);
priv
->
broadcast
=
NULL
;
}
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
...
...
@@ -793,24 +769,6 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
}
void
ipoib_mcast_dev_down
(
struct
net_device
*
dev
)
{
struct
ipoib_dev_priv
*
priv
=
netdev_priv
(
dev
);
unsigned
long
flags
;
/* Delete broadcast since it will be recreated */
if
(
priv
->
broadcast
)
{
ipoib_dbg_mcast
(
priv
,
"deleting broadcast group
\n
"
);
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
rb_erase
(
&
priv
->
broadcast
->
rb_node
,
&
priv
->
multicast_tree
);
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
ipoib_mcast_leave
(
dev
,
priv
->
broadcast
);
ipoib_mcast_free
(
priv
->
broadcast
);
priv
->
broadcast
=
NULL
;
}
}
void
ipoib_mcast_restart_task
(
void
*
dev_ptr
)
{
struct
net_device
*
dev
=
dev_ptr
;
...
...
@@ -824,7 +782,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
ipoib_mcast_stop_thread
(
dev
,
0
);
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
spin_lock_irqsave
(
&
dev
->
xmit_lock
,
flags
);
spin_lock
(
&
priv
->
lock
);
/*
* Unfortunately, the networking core only gives us a list of all of
...
...
@@ -896,7 +855,9 @@ void ipoib_mcast_restart_task(void *dev_ptr)
list_add_tail
(
&
mcast
->
list
,
&
remove_list
);
}
}
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
spin_unlock
(
&
priv
->
lock
);
spin_unlock_irqrestore
(
&
dev
->
xmit_lock
,
flags
);
/* We have to cancel outside of the spinlock */
list_for_each_entry_safe
(
mcast
,
tmcast
,
&
remove_list
,
list
)
{
...
...
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
View file @
fb7ffeb1
...
...
@@ -65,9 +65,9 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
}
/* attach QP to multicast group */
down
(
&
priv
->
mcast_mutex
);
mutex_lock
(
&
priv
->
mcast_mutex
);
ret
=
ib_attach_mcast
(
priv
->
qp
,
mgid
,
mlid
);
up
(
&
priv
->
mcast_mutex
);
mutex_unlock
(
&
priv
->
mcast_mutex
);
if
(
ret
)
ipoib_warn
(
priv
,
"failed to attach to multicast group, ret = %d
\n
"
,
ret
);
...
...
@@ -81,9 +81,9 @@ int ipoib_mcast_detach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
struct
ipoib_dev_priv
*
priv
=
netdev_priv
(
dev
);
int
ret
;
down
(
&
priv
->
mcast_mutex
);
mutex_lock
(
&
priv
->
mcast_mutex
);
ret
=
ib_detach_mcast
(
priv
->
qp
,
mgid
,
mlid
);
up
(
&
priv
->
mcast_mutex
);
mutex_unlock
(
&
priv
->
mcast_mutex
);
if
(
ret
)
ipoib_warn
(
priv
,
"ib_detach_mcast failed (result = %d)
\n
"
,
ret
);
...
...
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
View file @
fb7ffeb1
...
...
@@ -63,7 +63,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv
=
netdev_priv
(
pdev
);
down
(
&
ppriv
->
vlan_mutex
);
mutex_lock
(
&
ppriv
->
vlan_mutex
);
/*
* First ensure this isn't a duplicate. We check the parent device and
...
...
@@ -124,7 +124,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
list_add_tail
(
&
priv
->
list
,
&
ppriv
->
child_intfs
);
up
(
&
ppriv
->
vlan_mutex
);
mutex_unlock
(
&
ppriv
->
vlan_mutex
);
return
0
;
...
...
@@ -139,7 +139,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
free_netdev
(
priv
->
dev
);
err:
up
(
&
ppriv
->
vlan_mutex
);
mutex_unlock
(
&
ppriv
->
vlan_mutex
);
return
result
;
}
...
...
@@ -153,7 +153,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv
=
netdev_priv
(
pdev
);
down
(
&
ppriv
->
vlan_mutex
);
mutex_lock
(
&
ppriv
->
vlan_mutex
);
list_for_each_entry_safe
(
priv
,
tpriv
,
&
ppriv
->
child_intfs
,
list
)
{
if
(
priv
->
pkey
==
pkey
)
{
unregister_netdev
(
priv
->
dev
);
...
...
@@ -167,7 +167,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
break
;
}
}
up
(
&
ppriv
->
vlan_mutex
);
mutex_unlock
(
&
ppriv
->
vlan_mutex
);
return
ret
;
}
drivers/infiniband/ulp/srp/ib_srp.c
View file @
fb7ffeb1
...
...
@@ -1516,8 +1516,7 @@ static ssize_t show_port(struct class_device *class_dev, char *buf)
static
CLASS_DEVICE_ATTR
(
port
,
S_IRUGO
,
show_port
,
NULL
);
static
struct
srp_host
*
srp_add_port
(
struct
ib_device
*
device
,
__be64
node_guid
,
u8
port
)
static
struct
srp_host
*
srp_add_port
(
struct
ib_device
*
device
,
u8
port
)
{
struct
srp_host
*
host
;
...
...
@@ -1532,7 +1531,7 @@ static struct srp_host *srp_add_port(struct ib_device *device,
host
->
port
=
port
;
host
->
initiator_port_id
[
7
]
=
port
;
memcpy
(
host
->
initiator_port_id
+
8
,
&
node_guid
,
8
);
memcpy
(
host
->
initiator_port_id
+
8
,
&
device
->
node_guid
,
8
);
host
->
pd
=
ib_alloc_pd
(
device
);
if
(
IS_ERR
(
host
->
pd
))
...
...
@@ -1580,22 +1579,11 @@ static void srp_add_one(struct ib_device *device)
{
struct
list_head
*
dev_list
;
struct
srp_host
*
host
;
struct
ib_device_attr
*
dev_attr
;
int
s
,
e
,
p
;
dev_attr
=
kmalloc
(
sizeof
*
dev_attr
,
GFP_KERNEL
);
if
(
!
dev_attr
)
return
;
if
(
ib_query_device
(
device
,
dev_attr
))
{
printk
(
KERN_WARNING
PFX
"Couldn't query node GUID for %s.
\n
"
,
device
->
name
);
goto
out
;
}
dev_list
=
kmalloc
(
sizeof
*
dev_list
,
GFP_KERNEL
);
if
(
!
dev_list
)
goto
out
;
return
;
INIT_LIST_HEAD
(
dev_list
);
...
...
@@ -1608,15 +1596,12 @@ static void srp_add_one(struct ib_device *device)
}
for
(
p
=
s
;
p
<=
e
;
++
p
)
{
host
=
srp_add_port
(
device
,
dev_attr
->
node_guid
,
p
);
host
=
srp_add_port
(
device
,
p
);
if
(
host
)
list_add_tail
(
&
host
->
list
,
dev_list
);
}
ib_set_client_data
(
device
,
&
srp_client
,
dev_list
);
out:
kfree
(
dev_attr
);
}
static
void
srp_remove_one
(
struct
ib_device
*
device
)
...
...
include/rdma/ib_verbs.h
View file @
fb7ffeb1
...
...
@@ -88,7 +88,6 @@ enum ib_atomic_cap {
struct
ib_device_attr
{
u64
fw_ver
;
__be64
node_guid
;
__be64
sys_image_guid
;
u64
max_mr_size
;
u64
page_size_cap
;
...
...
@@ -951,6 +950,7 @@ struct ib_device {
u64
uverbs_cmd_mask
;
int
uverbs_abi_ver
;
__be64
node_guid
;
u8
node_type
;
u8
phys_port_cnt
;
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment