Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3bdad2d1
Commit
3bdad2d1
authored
Sep 22, 2014
by
Roland Dreier
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'core', 'ipoib', 'iser', 'mlx4', 'ocrdma' and 'qib' into for-next
parents
87773dd5
68f9d83c
61aabb3c
25476b02
f0c2c225
85cbb7c7
Changes
17
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
263 additions
and
149 deletions
+263
-149
drivers/infiniband/core/uverbs_marshall.c
drivers/infiniband/core/uverbs_marshall.c
+4
-0
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
+3
-3
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/main.c
+115
-54
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mlx4_ib.h
+1
-0
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/mr.c
+5
-2
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/qp.c
+38
-22
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+31
-12
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+2
-4
drivers/infiniband/hw/qib/qib_debugfs.c
drivers/infiniband/hw/qib/qib_debugfs.c
+2
-1
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/qib/qib_qp.c
+0
-8
drivers/infiniband/hw/qib/qib_user_pages.c
drivers/infiniband/hw/qib/qib_user_pages.c
+3
-3
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+1
-9
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.c
+14
-5
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iscsi_iser.h
+1
-1
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/iser/iser_verbs.c
+14
-10
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/mr.c
+22
-11
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/port.c
+7
-4
No files found.
drivers/infiniband/core/uverbs_marshall.c
View file @
3bdad2d1
...
@@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst,
...
@@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst,
dst
->
packet_life_time
=
src
->
packet_life_time
;
dst
->
packet_life_time
=
src
->
packet_life_time
;
dst
->
preference
=
src
->
preference
;
dst
->
preference
=
src
->
preference
;
dst
->
packet_life_time_selector
=
src
->
packet_life_time_selector
;
dst
->
packet_life_time_selector
=
src
->
packet_life_time_selector
;
memset
(
dst
->
smac
,
0
,
sizeof
(
dst
->
smac
));
memset
(
dst
->
dmac
,
0
,
sizeof
(
dst
->
dmac
));
dst
->
vlan_id
=
0xffff
;
}
}
EXPORT_SYMBOL
(
ib_copy_path_rec_from_user
);
EXPORT_SYMBOL
(
ib_copy_path_rec_from_user
);
drivers/infiniband/hw/ipath/ipath_user_pages.c
View file @
3bdad2d1
...
@@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
...
@@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
/* call with current->mm->mmap_sem held */
/* call with current->mm->mmap_sem held */
static
int
__ipath_get_user_pages
(
unsigned
long
start_page
,
size_t
num_pages
,
static
int
__ipath_get_user_pages
(
unsigned
long
start_page
,
size_t
num_pages
,
struct
page
**
p
,
struct
vm_area_struct
**
vma
)
struct
page
**
p
)
{
{
unsigned
long
lock_limit
;
unsigned
long
lock_limit
;
size_t
got
;
size_t
got
;
...
@@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
...
@@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
ret
=
get_user_pages
(
current
,
current
->
mm
,
ret
=
get_user_pages
(
current
,
current
->
mm
,
start_page
+
got
*
PAGE_SIZE
,
start_page
+
got
*
PAGE_SIZE
,
num_pages
-
got
,
1
,
1
,
num_pages
-
got
,
1
,
1
,
p
+
got
,
vma
);
p
+
got
,
NULL
);
if
(
ret
<
0
)
if
(
ret
<
0
)
goto
bail_release
;
goto
bail_release
;
}
}
...
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
...
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
down_write
(
&
current
->
mm
->
mmap_sem
);
down_write
(
&
current
->
mm
->
mmap_sem
);
ret
=
__ipath_get_user_pages
(
start_page
,
num_pages
,
p
,
NULL
);
ret
=
__ipath_get_user_pages
(
start_page
,
num_pages
,
p
);
up_write
(
&
current
->
mm
->
mmap_sem
);
up_write
(
&
current
->
mm
->
mmap_sem
);
...
...
drivers/infiniband/hw/mlx4/main.c
View file @
3bdad2d1
This diff is collapsed.
Click to expand it.
drivers/infiniband/hw/mlx4/mlx4_ib.h
View file @
3bdad2d1
...
@@ -451,6 +451,7 @@ struct mlx4_ib_iboe {
...
@@ -451,6 +451,7 @@ struct mlx4_ib_iboe {
spinlock_t
lock
;
spinlock_t
lock
;
struct
net_device
*
netdevs
[
MLX4_MAX_PORTS
];
struct
net_device
*
netdevs
[
MLX4_MAX_PORTS
];
struct
net_device
*
masters
[
MLX4_MAX_PORTS
];
struct
net_device
*
masters
[
MLX4_MAX_PORTS
];
atomic64_t
mac
[
MLX4_MAX_PORTS
];
struct
notifier_block
nb
;
struct
notifier_block
nb
;
struct
notifier_block
nb_inet
;
struct
notifier_block
nb_inet
;
struct
notifier_block
nb_inet6
;
struct
notifier_block
nb_inet6
;
...
...
drivers/infiniband/hw/mlx4/mr.c
View file @
3bdad2d1
...
@@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
...
@@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
0
);
0
);
if
(
IS_ERR
(
mmr
->
umem
))
{
if
(
IS_ERR
(
mmr
->
umem
))
{
err
=
PTR_ERR
(
mmr
->
umem
);
err
=
PTR_ERR
(
mmr
->
umem
);
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
mmr
->
umem
=
NULL
;
mmr
->
umem
=
NULL
;
goto
release_mpt_entry
;
goto
release_mpt_entry
;
}
}
n
=
ib_umem_page_count
(
mmr
->
umem
);
n
=
ib_umem_page_count
(
mmr
->
umem
);
shift
=
ilog2
(
mmr
->
umem
->
page_size
);
shift
=
ilog2
(
mmr
->
umem
->
page_size
);
mmr
->
mmr
.
iova
=
virt_addr
;
mmr
->
mmr
.
size
=
length
;
err
=
mlx4_mr_rereg_mem_write
(
dev
->
dev
,
&
mmr
->
mmr
,
err
=
mlx4_mr_rereg_mem_write
(
dev
->
dev
,
&
mmr
->
mmr
,
virt_addr
,
length
,
n
,
shift
,
virt_addr
,
length
,
n
,
shift
,
*
pmpt_entry
);
*
pmpt_entry
);
...
@@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
...
@@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
ib_umem_release
(
mmr
->
umem
);
ib_umem_release
(
mmr
->
umem
);
goto
release_mpt_entry
;
goto
release_mpt_entry
;
}
}
mmr
->
mmr
.
iova
=
virt_addr
;
mmr
->
mmr
.
size
=
length
;
err
=
mlx4_ib_umem_write_mtt
(
dev
,
&
mmr
->
mmr
.
mtt
,
mmr
->
umem
);
err
=
mlx4_ib_umem_write_mtt
(
dev
,
&
mmr
->
mmr
.
mtt
,
mmr
->
umem
);
if
(
err
)
{
if
(
err
)
{
...
@@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
...
@@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
* return a failure. But dereg_mr will free the resources.
* return a failure. But dereg_mr will free the resources.
*/
*/
err
=
mlx4_mr_hw_write_mpt
(
dev
->
dev
,
&
mmr
->
mmr
,
pmpt_entry
);
err
=
mlx4_mr_hw_write_mpt
(
dev
->
dev
,
&
mmr
->
mmr
,
pmpt_entry
);
if
(
!
err
&&
flags
&
IB_MR_REREG_ACCESS
)
mmr
->
mmr
.
access
=
mr_access_flags
;
release_mpt_entry:
release_mpt_entry:
mlx4_mr_hw_put_mpt
(
dev
->
dev
,
pmpt_entry
);
mlx4_mr_hw_put_mpt
(
dev
->
dev
,
pmpt_entry
);
...
...
drivers/infiniband/hw/mlx4/qp.c
View file @
3bdad2d1
...
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
...
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
MLX4_QP_STATE_RST
,
NULL
,
0
,
0
,
&
qp
->
mqp
))
MLX4_QP_STATE_RST
,
NULL
,
0
,
0
,
&
qp
->
mqp
))
pr_warn
(
"modify QP %06x to RESET failed.
\n
"
,
pr_warn
(
"modify QP %06x to RESET failed.
\n
"
,
qp
->
mqp
.
qpn
);
qp
->
mqp
.
qpn
);
if
(
qp
->
pri
.
smac
)
{
if
(
qp
->
pri
.
smac
||
(
!
qp
->
pri
.
smac
&&
qp
->
pri
.
smac_port
)
)
{
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
pri
.
smac_port
,
qp
->
pri
.
smac
);
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
pri
.
smac_port
,
qp
->
pri
.
smac
);
qp
->
pri
.
smac
=
0
;
qp
->
pri
.
smac
=
0
;
qp
->
pri
.
smac_port
=
0
;
}
}
if
(
qp
->
alt
.
smac
)
{
if
(
qp
->
alt
.
smac
)
{
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
alt
.
smac_port
,
qp
->
alt
.
smac
);
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
alt
.
smac_port
,
qp
->
alt
.
smac
);
...
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
...
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
* If one was already assigned, but the new mac differs,
* If one was already assigned, but the new mac differs,
* unregister the old one and register the new one.
* unregister the old one and register the new one.
*/
*/
if
(
!
smac_info
->
smac
||
smac_info
->
smac
!=
smac
)
{
if
((
!
smac_info
->
smac
&&
!
smac_info
->
smac_port
)
||
smac_info
->
smac
!=
smac
)
{
/* register candidate now, unreg if needed, after success */
/* register candidate now, unreg if needed, after success */
smac_index
=
mlx4_register_mac
(
dev
->
dev
,
port
,
smac
);
smac_index
=
mlx4_register_mac
(
dev
->
dev
,
port
,
smac
);
if
(
smac_index
>=
0
)
{
if
(
smac_index
>=
0
)
{
...
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
...
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
static
int
handle_eth_ud_smac_index
(
struct
mlx4_ib_dev
*
dev
,
struct
mlx4_ib_qp
*
qp
,
u8
*
smac
,
static
int
handle_eth_ud_smac_index
(
struct
mlx4_ib_dev
*
dev
,
struct
mlx4_ib_qp
*
qp
,
u8
*
smac
,
struct
mlx4_qp_context
*
context
)
struct
mlx4_qp_context
*
context
)
{
{
struct
net_device
*
ndev
;
u64
u64_mac
;
u64
u64_mac
;
int
smac_index
;
int
smac_index
;
u64_mac
=
atomic64_read
(
&
dev
->
iboe
.
mac
[
qp
->
port
-
1
]);
ndev
=
dev
->
iboe
.
netdevs
[
qp
->
port
-
1
];
if
(
ndev
)
{
smac
=
ndev
->
dev_addr
;
u64_mac
=
mlx4_mac_to_u64
(
smac
);
}
else
{
u64_mac
=
dev
->
dev
->
caps
.
def_mac
[
qp
->
port
];
}
context
->
pri_path
.
sched_queue
=
MLX4_IB_DEFAULT_SCHED_QUEUE
|
((
qp
->
port
-
1
)
<<
6
);
context
->
pri_path
.
sched_queue
=
MLX4_IB_DEFAULT_SCHED_QUEUE
|
((
qp
->
port
-
1
)
<<
6
);
if
(
!
qp
->
pri
.
smac
)
{
if
(
!
qp
->
pri
.
smac
&&
!
qp
->
pri
.
smac_port
)
{
smac_index
=
mlx4_register_mac
(
dev
->
dev
,
qp
->
port
,
u64_mac
);
smac_index
=
mlx4_register_mac
(
dev
->
dev
,
qp
->
port
,
u64_mac
);
if
(
smac_index
>=
0
)
{
if
(
smac_index
>=
0
)
{
qp
->
pri
.
candidate_smac_index
=
smac_index
;
qp
->
pri
.
candidate_smac_index
=
smac_index
;
...
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
...
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
int
steer_qp
=
0
;
int
steer_qp
=
0
;
int
err
=
-
EINVAL
;
int
err
=
-
EINVAL
;
/* APM is not supported under RoCE */
if
(
attr_mask
&
IB_QP_ALT_PATH
&&
rdma_port_get_link_layer
(
&
dev
->
ib_dev
,
qp
->
port
)
==
IB_LINK_LAYER_ETHERNET
)
return
-
ENOTSUPP
;
context
=
kzalloc
(
sizeof
*
context
,
GFP_KERNEL
);
context
=
kzalloc
(
sizeof
*
context
,
GFP_KERNEL
);
if
(
!
context
)
if
(
!
context
)
return
-
ENOMEM
;
return
-
ENOMEM
;
...
@@ -1780,9 +1780,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
...
@@ -1780,9 +1780,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if
(
qp
->
flags
&
MLX4_IB_QP_NETIF
)
if
(
qp
->
flags
&
MLX4_IB_QP_NETIF
)
mlx4_ib_steer_qp_reg
(
dev
,
qp
,
0
);
mlx4_ib_steer_qp_reg
(
dev
,
qp
,
0
);
}
}
if
(
qp
->
pri
.
smac
)
{
if
(
qp
->
pri
.
smac
||
(
!
qp
->
pri
.
smac
&&
qp
->
pri
.
smac_port
)
)
{
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
pri
.
smac_port
,
qp
->
pri
.
smac
);
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
pri
.
smac_port
,
qp
->
pri
.
smac
);
qp
->
pri
.
smac
=
0
;
qp
->
pri
.
smac
=
0
;
qp
->
pri
.
smac_port
=
0
;
}
}
if
(
qp
->
alt
.
smac
)
{
if
(
qp
->
alt
.
smac
)
{
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
alt
.
smac_port
,
qp
->
alt
.
smac
);
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
alt
.
smac_port
,
qp
->
alt
.
smac
);
...
@@ -1806,11 +1807,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
...
@@ -1806,11 +1807,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if
(
err
&&
steer_qp
)
if
(
err
&&
steer_qp
)
mlx4_ib_steer_qp_reg
(
dev
,
qp
,
0
);
mlx4_ib_steer_qp_reg
(
dev
,
qp
,
0
);
kfree
(
context
);
kfree
(
context
);
if
(
qp
->
pri
.
candidate_smac
)
{
if
(
qp
->
pri
.
candidate_smac
||
(
!
qp
->
pri
.
candidate_smac
&&
qp
->
pri
.
candidate_smac_port
))
{
if
(
err
)
{
if
(
err
)
{
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
pri
.
candidate_smac_port
,
qp
->
pri
.
candidate_smac
);
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
pri
.
candidate_smac_port
,
qp
->
pri
.
candidate_smac
);
}
else
{
}
else
{
if
(
qp
->
pri
.
smac
)
if
(
qp
->
pri
.
smac
||
(
!
qp
->
pri
.
smac
&&
qp
->
pri
.
smac_port
)
)
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
pri
.
smac_port
,
qp
->
pri
.
smac
);
mlx4_unregister_mac
(
dev
->
dev
,
qp
->
pri
.
smac_port
,
qp
->
pri
.
smac
);
qp
->
pri
.
smac
=
qp
->
pri
.
candidate_smac
;
qp
->
pri
.
smac
=
qp
->
pri
.
candidate_smac
;
qp
->
pri
.
smac_index
=
qp
->
pri
.
candidate_smac_index
;
qp
->
pri
.
smac_index
=
qp
->
pri
.
candidate_smac_index
;
...
@@ -2083,6 +2085,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
...
@@ -2083,6 +2085,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
return
0
;
return
0
;
}
}
static
void
mlx4_u64_to_smac
(
u8
*
dst_mac
,
u64
src_mac
)
{
int
i
;
for
(
i
=
ETH_ALEN
;
i
;
i
--
)
{
dst_mac
[
i
-
1
]
=
src_mac
&
0xff
;
src_mac
>>=
8
;
}
}
static
int
build_mlx_header
(
struct
mlx4_ib_sqp
*
sqp
,
struct
ib_send_wr
*
wr
,
static
int
build_mlx_header
(
struct
mlx4_ib_sqp
*
sqp
,
struct
ib_send_wr
*
wr
,
void
*
wqe
,
unsigned
*
mlx_seg_len
)
void
*
wqe
,
unsigned
*
mlx_seg_len
)
{
{
...
@@ -2197,7 +2209,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
...
@@ -2197,7 +2209,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
}
}
if
(
is_eth
)
{
if
(
is_eth
)
{
u8
*
smac
;
struct
in6_addr
in6
;
struct
in6_addr
in6
;
u16
pcp
=
(
be32_to_cpu
(
ah
->
av
.
ib
.
sl_tclass_flowlabel
)
>>
29
)
<<
13
;
u16
pcp
=
(
be32_to_cpu
(
ah
->
av
.
ib
.
sl_tclass_flowlabel
)
>>
29
)
<<
13
;
...
@@ -2210,12 +2221,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
...
@@ -2210,12 +2221,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
memcpy
(
&
ctrl
->
imm
,
ah
->
av
.
eth
.
mac
+
2
,
4
);
memcpy
(
&
ctrl
->
imm
,
ah
->
av
.
eth
.
mac
+
2
,
4
);
memcpy
(
&
in6
,
sgid
.
raw
,
sizeof
(
in6
));
memcpy
(
&
in6
,
sgid
.
raw
,
sizeof
(
in6
));
if
(
!
mlx4_is_mfunc
(
to_mdev
(
ib_dev
)
->
dev
))
if
(
!
mlx4_is_mfunc
(
to_mdev
(
ib_dev
)
->
dev
))
{
smac
=
to_mdev
(
sqp
->
qp
.
ibqp
.
device
)
->
u64
mac
=
atomic64_read
(
&
to_mdev
(
ib_dev
)
->
iboe
.
mac
[
sqp
->
qp
.
port
-
1
]);
iboe
.
netdevs
[
sqp
->
qp
.
port
-
1
]
->
dev_addr
;
u8
smac
[
ETH_ALEN
];
else
/* use the src mac of the tunnel */
smac
=
ah
->
av
.
eth
.
s_mac
;
mlx4_u64_to_smac
(
smac
,
mac
);
memcpy
(
sqp
->
ud_header
.
eth
.
smac_h
,
smac
,
6
);
memcpy
(
sqp
->
ud_header
.
eth
.
smac_h
,
smac
,
ETH_ALEN
);
}
else
{
/* use the src mac of the tunnel */
memcpy
(
sqp
->
ud_header
.
eth
.
smac_h
,
ah
->
av
.
eth
.
s_mac
,
ETH_ALEN
);
}
if
(
!
memcmp
(
sqp
->
ud_header
.
eth
.
smac_h
,
sqp
->
ud_header
.
eth
.
dmac_h
,
6
))
if
(
!
memcmp
(
sqp
->
ud_header
.
eth
.
smac_h
,
sqp
->
ud_header
.
eth
.
dmac_h
,
6
))
mlx
->
flags
|=
cpu_to_be32
(
MLX4_WQE_CTRL_FORCE_LOOPBACK
);
mlx
->
flags
|=
cpu_to_be32
(
MLX4_WQE_CTRL_FORCE_LOOPBACK
);
if
(
!
is_vlan
)
{
if
(
!
is_vlan
)
{
...
...
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
View file @
3bdad2d1
...
@@ -38,7 +38,7 @@
...
@@ -38,7 +38,7 @@
#define OCRDMA_VID_PCP_SHIFT 0xD
#define OCRDMA_VID_PCP_SHIFT 0xD
static
inline
int
set_av_attr
(
struct
ocrdma_dev
*
dev
,
struct
ocrdma_ah
*
ah
,
static
inline
int
set_av_attr
(
struct
ocrdma_dev
*
dev
,
struct
ocrdma_ah
*
ah
,
struct
ib_ah_attr
*
attr
,
int
pdid
)
struct
ib_ah_attr
*
attr
,
union
ib_gid
*
sgid
,
int
pdid
)
{
{
int
status
=
0
;
int
status
=
0
;
u16
vlan_tag
;
bool
vlan_enabled
=
false
;
u16
vlan_tag
;
bool
vlan_enabled
=
false
;
...
@@ -49,8 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
...
@@ -49,8 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
memset
(
&
eth
,
0
,
sizeof
(
eth
));
memset
(
&
eth
,
0
,
sizeof
(
eth
));
memset
(
&
grh
,
0
,
sizeof
(
grh
));
memset
(
&
grh
,
0
,
sizeof
(
grh
));
ah
->
sgid_index
=
attr
->
grh
.
sgid_index
;
/* VLAN */
vlan_tag
=
attr
->
vlan_id
;
vlan_tag
=
attr
->
vlan_id
;
if
(
!
vlan_tag
||
(
vlan_tag
>
0xFFF
))
if
(
!
vlan_tag
||
(
vlan_tag
>
0xFFF
))
vlan_tag
=
dev
->
pvid
;
vlan_tag
=
dev
->
pvid
;
...
@@ -65,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
...
@@ -65,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
eth
.
eth_type
=
cpu_to_be16
(
OCRDMA_ROCE_ETH_TYPE
);
eth
.
eth_type
=
cpu_to_be16
(
OCRDMA_ROCE_ETH_TYPE
);
eth_sz
=
sizeof
(
struct
ocrdma_eth_basic
);
eth_sz
=
sizeof
(
struct
ocrdma_eth_basic
);
}
}
/* MAC */
memcpy
(
&
eth
.
smac
[
0
],
&
dev
->
nic_info
.
mac_addr
[
0
],
ETH_ALEN
);
memcpy
(
&
eth
.
smac
[
0
],
&
dev
->
nic_info
.
mac_addr
[
0
],
ETH_ALEN
);
memcpy
(
&
eth
.
dmac
[
0
],
attr
->
dmac
,
ETH_ALEN
);
status
=
ocrdma_resolve_dmac
(
dev
,
attr
,
&
eth
.
dmac
[
0
]);
status
=
ocrdma_resolve_dmac
(
dev
,
attr
,
&
eth
.
dmac
[
0
]);
if
(
status
)
if
(
status
)
return
status
;
return
status
;
status
=
ocrdma_query_gid
(
&
dev
->
ibdev
,
1
,
attr
->
grh
.
sgid_index
,
ah
->
sgid_index
=
attr
->
grh
.
sgid_index
;
(
union
ib_gid
*
)
&
grh
.
sgid
[
0
]);
memcpy
(
&
grh
.
sgid
[
0
],
sgid
->
raw
,
sizeof
(
union
ib_gid
));
if
(
status
)
memcpy
(
&
grh
.
dgid
[
0
],
attr
->
grh
.
dgid
.
raw
,
sizeof
(
attr
->
grh
.
dgid
.
raw
));
return
status
;
grh
.
tclass_flow
=
cpu_to_be32
((
6
<<
28
)
|
grh
.
tclass_flow
=
cpu_to_be32
((
6
<<
28
)
|
(
attr
->
grh
.
traffic_class
<<
24
)
|
(
attr
->
grh
.
traffic_class
<<
24
)
|
...
@@ -81,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
...
@@ -81,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
/* 0x1b is next header value in GRH */
/* 0x1b is next header value in GRH */
grh
.
pdid_hoplimit
=
cpu_to_be32
((
pdid
<<
16
)
|
grh
.
pdid_hoplimit
=
cpu_to_be32
((
pdid
<<
16
)
|
(
0x1b
<<
8
)
|
attr
->
grh
.
hop_limit
);
(
0x1b
<<
8
)
|
attr
->
grh
.
hop_limit
);
/* Eth HDR */
memcpy
(
&
grh
.
dgid
[
0
],
attr
->
grh
.
dgid
.
raw
,
sizeof
(
attr
->
grh
.
dgid
.
raw
));
memcpy
(
&
ah
->
av
->
eth_hdr
,
&
eth
,
eth_sz
);
memcpy
(
&
ah
->
av
->
eth_hdr
,
&
eth
,
eth_sz
);
memcpy
((
u8
*
)
ah
->
av
+
eth_sz
,
&
grh
,
sizeof
(
struct
ocrdma_grh
));
memcpy
((
u8
*
)
ah
->
av
+
eth_sz
,
&
grh
,
sizeof
(
struct
ocrdma_grh
));
if
(
vlan_enabled
)
if
(
vlan_enabled
)
...
@@ -98,6 +95,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
...
@@ -98,6 +95,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
struct
ocrdma_ah
*
ah
;
struct
ocrdma_ah
*
ah
;
struct
ocrdma_pd
*
pd
=
get_ocrdma_pd
(
ibpd
);
struct
ocrdma_pd
*
pd
=
get_ocrdma_pd
(
ibpd
);
struct
ocrdma_dev
*
dev
=
get_ocrdma_dev
(
ibpd
->
device
);
struct
ocrdma_dev
*
dev
=
get_ocrdma_dev
(
ibpd
->
device
);
union
ib_gid
sgid
;
u8
zmac
[
ETH_ALEN
];
if
(
!
(
attr
->
ah_flags
&
IB_AH_GRH
))
if
(
!
(
attr
->
ah_flags
&
IB_AH_GRH
))
return
ERR_PTR
(
-
EINVAL
);
return
ERR_PTR
(
-
EINVAL
);
...
@@ -111,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
...
@@ -111,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
status
=
ocrdma_alloc_av
(
dev
,
ah
);
status
=
ocrdma_alloc_av
(
dev
,
ah
);
if
(
status
)
if
(
status
)
goto
av_err
;
goto
av_err
;
status
=
set_av_attr
(
dev
,
ah
,
attr
,
pd
->
id
);
status
=
ocrdma_query_gid
(
&
dev
->
ibdev
,
1
,
attr
->
grh
.
sgid_index
,
&
sgid
);
if
(
status
)
{
pr_err
(
"%s(): Failed to query sgid, status = %d
\n
"
,
__func__
,
status
);
goto
av_conf_err
;
}
memset
(
&
zmac
,
0
,
ETH_ALEN
);
if
(
pd
->
uctx
&&
memcmp
(
attr
->
dmac
,
&
zmac
,
ETH_ALEN
))
{
status
=
rdma_addr_find_dmac_by_grh
(
&
sgid
,
&
attr
->
grh
.
dgid
,
attr
->
dmac
,
&
attr
->
vlan_id
);
if
(
status
)
{
pr_err
(
"%s(): Failed to resolve dmac from gid."
"status = %d
\n
"
,
__func__
,
status
);
goto
av_conf_err
;
}
}
status
=
set_av_attr
(
dev
,
ah
,
attr
,
&
sgid
,
pd
->
id
);
if
(
status
)
if
(
status
)
goto
av_conf_err
;
goto
av_conf_err
;
...
@@ -145,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
...
@@ -145,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
struct
ocrdma_av
*
av
=
ah
->
av
;
struct
ocrdma_av
*
av
=
ah
->
av
;
struct
ocrdma_grh
*
grh
;
struct
ocrdma_grh
*
grh
;
attr
->
ah_flags
|=
IB_AH_GRH
;
attr
->
ah_flags
|=
IB_AH_GRH
;
if
(
ah
->
av
->
valid
&
Bit
(
1
)
)
{
if
(
ah
->
av
->
valid
&
OCRDMA_AV_VALID
)
{
grh
=
(
struct
ocrdma_grh
*
)((
u8
*
)
ah
->
av
+
grh
=
(
struct
ocrdma_grh
*
)((
u8
*
)
ah
->
av
+
sizeof
(
struct
ocrdma_eth_vlan
));
sizeof
(
struct
ocrdma_eth_vlan
));
attr
->
sl
=
be16_to_cpu
(
av
->
eth_hdr
.
vlan_tag
)
>>
13
;
attr
->
sl
=
be16_to_cpu
(
av
->
eth_hdr
.
vlan_tag
)
>>
13
;
...
...
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
View file @
3bdad2d1
...
@@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
...
@@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
attr
->
max_srq_sge
=
dev
->
attr
.
max_srq_sge
;
attr
->
max_srq_sge
=
dev
->
attr
.
max_srq_sge
;
attr
->
max_srq_wr
=
dev
->
attr
.
max_rqe
;
attr
->
max_srq_wr
=
dev
->
attr
.
max_rqe
;
attr
->
local_ca_ack_delay
=
dev
->
attr
.
local_ca_ack_delay
;
attr
->
local_ca_ack_delay
=
dev
->
attr
.
local_ca_ack_delay
;
attr
->
max_fast_reg_page_list_len
=
0
;
attr
->
max_fast_reg_page_list_len
=
dev
->
attr
.
max_pages_per_frmr
;
attr
->
max_pkeys
=
1
;
attr
->
max_pkeys
=
1
;
return
0
;
return
0
;
}
}
...
@@ -2846,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
...
@@ -2846,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
if
(
cq
->
first_arm
)
{
if
(
cq
->
first_arm
)
{
ocrdma_ring_cq_db
(
dev
,
cq_id
,
arm_needed
,
sol_needed
,
0
);
ocrdma_ring_cq_db
(
dev
,
cq_id
,
arm_needed
,
sol_needed
,
0
);
cq
->
first_arm
=
false
;
cq
->
first_arm
=
false
;
goto
skip_defer
;
}
}
cq
->
deferred_arm
=
true
;
skip_defer:
cq
->
deferred_arm
=
true
;
cq
->
deferred_sol
=
sol_needed
;
cq
->
deferred_sol
=
sol_needed
;
spin_unlock_irqrestore
(
&
cq
->
cq_lock
,
flags
);
spin_unlock_irqrestore
(
&
cq
->
cq_lock
,
flags
);
...
...
drivers/infiniband/hw/qib/qib_debugfs.c
View file @
3bdad2d1
...
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
...
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
struct
qib_qp_iter
*
iter
;
struct
qib_qp_iter
*
iter
;
loff_t
n
=
*
pos
;
loff_t
n
=
*
pos
;
rcu_read_lock
();
iter
=
qib_qp_iter_init
(
s
->
private
);
iter
=
qib_qp_iter_init
(
s
->
private
);
if
(
!
iter
)
if
(
!
iter
)
return
NULL
;
return
NULL
;
...
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
...
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
static
void
_qp_stats_seq_stop
(
struct
seq_file
*
s
,
void
*
iter_ptr
)
static
void
_qp_stats_seq_stop
(
struct
seq_file
*
s
,
void
*
iter_ptr
)
{
{
/* nothing for now */
rcu_read_unlock
();
}
}
static
int
_qp_stats_seq_show
(
struct
seq_file
*
s
,
void
*
iter_ptr
)
static
int
_qp_stats_seq_show
(
struct
seq_file
*
s
,
void
*
iter_ptr
)
...
...
drivers/infiniband/hw/qib/qib_qp.c
View file @
3bdad2d1
...
@@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
...
@@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
struct
qib_qp
*
pqp
=
iter
->
qp
;
struct
qib_qp
*
pqp
=
iter
->
qp
;
struct
qib_qp
*
qp
;
struct
qib_qp
*
qp
;
rcu_read_lock
();
for
(;
n
<
dev
->
qp_table_size
;
n
++
)
{
for
(;
n
<
dev
->
qp_table_size
;
n
++
)
{
if
(
pqp
)
if
(
pqp
)
qp
=
rcu_dereference
(
pqp
->
next
);
qp
=
rcu_dereference
(
pqp
->
next
);
...
@@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
...
@@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
qp
=
rcu_dereference
(
dev
->
qp_table
[
n
]);
qp
=
rcu_dereference
(
dev
->
qp_table
[
n
]);
pqp
=
qp
;
pqp
=
qp
;
if
(
qp
)
{
if
(
qp
)
{
if
(
iter
->
qp
)
atomic_dec
(
&
iter
->
qp
->
refcount
);
atomic_inc
(
&
qp
->
refcount
);
rcu_read_unlock
();
iter
->
qp
=
qp
;
iter
->
qp
=
qp
;
iter
->
n
=
n
;
iter
->
n
=
n
;
return
0
;
return
0
;
}
}
}
}
rcu_read_unlock
();
if
(
iter
->
qp
)
atomic_dec
(
&
iter
->
qp
->
refcount
);
return
ret
;
return
ret
;
}
}
...
...
drivers/infiniband/hw/qib/qib_user_pages.c
View file @
3bdad2d1
...
@@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
...
@@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
* Call with current->mm->mmap_sem held.
* Call with current->mm->mmap_sem held.
*/
*/
static
int
__qib_get_user_pages
(
unsigned
long
start_page
,
size_t
num_pages
,
static
int
__qib_get_user_pages
(
unsigned
long
start_page
,
size_t
num_pages
,
struct
page
**
p
,
struct
vm_area_struct
**
vma
)
struct
page
**
p
)
{
{
unsigned
long
lock_limit
;
unsigned
long
lock_limit
;
size_t
got
;
size_t
got
;
...
@@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
...
@@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
ret
=
get_user_pages
(
current
,
current
->
mm
,
ret
=
get_user_pages
(
current
,
current
->
mm
,
start_page
+
got
*
PAGE_SIZE
,
start_page
+
got
*
PAGE_SIZE
,
num_pages
-
got
,
1
,
1
,
num_pages
-
got
,
1
,
1
,
p
+
got
,
vma
);
p
+
got
,
NULL
);
if
(
ret
<
0
)
if
(
ret
<
0
)
goto
bail_release
;
goto
bail_release
;
}
}
...
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
...
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
down_write
(
&
current
->
mm
->
mmap_sem
);
down_write
(
&
current
->
mm
->
mmap_sem
);
ret
=
__qib_get_user_pages
(
start_page
,
num_pages
,
p
,
NULL
);
ret
=
__qib_get_user_pages
(
start_page
,
num_pages
,
p
);
up_write
(
&
current
->
mm
->
mmap_sem
);
up_write
(
&
current
->
mm
->
mmap_sem
);
...
...
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
View file @
3bdad2d1
...
@@ -529,21 +529,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
...
@@ -529,21 +529,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
port_attr
.
state
);
port_attr
.
state
);
return
;
return
;
}
}
priv
->
local_lid
=
port_attr
.
lid
;
if
(
ib_query_gid
(
priv
->
ca
,
priv
->
port
,
0
,
&
priv
->
local_gid
))
if
(
ib_query_gid
(
priv
->
ca
,
priv
->
port
,
0
,
&
priv
->
local_gid
))
ipoib_warn
(
priv
,
"ib_query_gid() failed
\n
"
);
ipoib_warn
(
priv
,
"ib_query_gid() failed
\n
"
);
else
else
memcpy
(
priv
->
dev
->
dev_addr
+
4
,
priv
->
local_gid
.
raw
,
sizeof
(
union
ib_gid
));
memcpy
(
priv
->
dev
->
dev_addr
+
4
,
priv
->
local_gid
.
raw
,
sizeof
(
union
ib_gid
));
{
struct
ib_port_attr
attr
;
if
(
!
ib_query_port
(
priv
->
ca
,
priv
->
port
,
&
attr
))
priv
->
local_lid
=
attr
.
lid
;
else
ipoib_warn
(
priv
,
"ib_query_port failed
\n
"
);
}
if
(
!
priv
->
broadcast
)
{
if
(
!
priv
->
broadcast
)
{
struct
ipoib_mcast
*
broadcast
;
struct
ipoib_mcast
*
broadcast
;
...
...
drivers/infiniband/ulp/iser/iscsi_iser.c
View file @
3bdad2d1
...
@@ -344,7 +344,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
...
@@ -344,7 +344,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
int
is_leading
)
int
is_leading
)
{
{
struct
iscsi_conn
*
conn
=
cls_conn
->
dd_data
;
struct
iscsi_conn
*
conn
=
cls_conn
->
dd_data
;
struct
iscsi_session
*
session
;
struct
iser_conn
*
ib_conn
;
struct
iser_conn
*
ib_conn
;
struct
iscsi_endpoint
*
ep
;
struct
iscsi_endpoint
*
ep
;
int
error
;
int
error
;
...
@@ -363,9 +362,17 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
...
@@ -363,9 +362,17 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
}
}
ib_conn
=
ep
->
dd_data
;
ib_conn
=
ep
->
dd_data
;
session
=
conn
->
session
;
mutex_lock
(
&
ib_conn
->
state_mutex
);
if
(
iser_alloc_rx_descriptors
(
ib_conn
,
session
))
if
(
ib_conn
->
state
!=
ISER_CONN_UP
)
{
return
-
ENOMEM
;
error
=
-
EINVAL
;
iser_err
(
"iser_conn %p state is %d, teardown started
\n
"
,
ib_conn
,
ib_conn
->
state
);
goto
out
;
}
error
=
iser_alloc_rx_descriptors
(
ib_conn
,
conn
->
session
);
if
(
error
)
goto
out
;
/* binds the iSER connection retrieved from the previously
/* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges
* connected ep_handle to the iSCSI layer connection. exchanges
...
@@ -375,7 +382,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
...
@@ -375,7 +382,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
conn
->
dd_data
=
ib_conn
;
conn
->
dd_data
=
ib_conn
;
ib_conn
->
iscsi_conn
=
conn
;
ib_conn
->
iscsi_conn
=
conn
;
return
0
;
out:
mutex_unlock
(
&
ib_conn
->
state_mutex
);
return
error
;
}
}
static
int
static
int
...
...
drivers/infiniband/ulp/iser/iscsi_iser.h
View file @
3bdad2d1
...
@@ -69,7 +69,7 @@
...
@@ -69,7 +69,7 @@
#define DRV_NAME "iser"
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
#define PFX DRV_NAME ": "
#define DRV_VER "1.4"
#define DRV_VER "1.4
.1
"
#define iser_dbg(fmt, arg...) \
#define iser_dbg(fmt, arg...) \
do { \
do { \
...
...
drivers/infiniband/ulp/iser/iser_verbs.c
View file @
3bdad2d1
...
@@ -73,7 +73,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
...
@@ -73,7 +73,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
{
{
struct
iser_cq_desc
*
cq_desc
;
struct
iser_cq_desc
*
cq_desc
;
struct
ib_device_attr
*
dev_attr
=
&
device
->
dev_attr
;
struct
ib_device_attr
*
dev_attr
=
&
device
->
dev_attr
;
int
ret
,
i
,
j
;
int
ret
,
i
;
ret
=
ib_query_device
(
device
->
ib_device
,
dev_attr
);
ret
=
ib_query_device
(
device
->
ib_device
,
dev_attr
);
if
(
ret
)
{
if
(
ret
)
{
...
@@ -125,16 +125,20 @@ static int iser_create_device_ib_res(struct iser_device *device)
...
@@ -125,16 +125,20 @@ static int iser_create_device_ib_res(struct iser_device *device)
iser_cq_event_callback
,
iser_cq_event_callback
,
(
void
*
)
&
cq_desc
[
i
],
(
void
*
)
&
cq_desc
[
i
],
ISER_MAX_RX_CQ_LEN
,
i
);
ISER_MAX_RX_CQ_LEN
,
i
);
if
(
IS_ERR
(
device
->
rx_cq
[
i
]))
if
(
IS_ERR
(
device
->
rx_cq
[
i
]))
{
device
->
rx_cq
[
i
]
=
NULL
;
goto
cq_err
;
goto
cq_err
;
}
device
->
tx_cq
[
i
]
=
ib_create_cq
(
device
->
ib_device
,
device
->
tx_cq
[
i
]
=
ib_create_cq
(
device
->
ib_device
,
NULL
,
iser_cq_event_callback
,
NULL
,
iser_cq_event_callback
,
(
void
*
)
&
cq_desc
[
i
],
(
void
*
)
&
cq_desc
[
i
],
ISER_MAX_TX_CQ_LEN
,
i
);
ISER_MAX_TX_CQ_LEN
,
i
);
if
(
IS_ERR
(
device
->
tx_cq
[
i
]))
if
(
IS_ERR
(
device
->
tx_cq
[
i
]))
{
device
->
tx_cq
[
i
]
=
NULL
;
goto
cq_err
;
goto
cq_err
;
}
if
(
ib_req_notify_cq
(
device
->
rx_cq
[
i
],
IB_CQ_NEXT_COMP
))
if
(
ib_req_notify_cq
(
device
->
rx_cq
[
i
],
IB_CQ_NEXT_COMP
))
goto
cq_err
;
goto
cq_err
;
...
@@ -160,14 +164,14 @@ static int iser_create_device_ib_res(struct iser_device *device)
...
@@ -160,14 +164,14 @@ static int iser_create_device_ib_res(struct iser_device *device)
handler_err:
handler_err:
ib_dereg_mr
(
device
->
mr
);
ib_dereg_mr
(
device
->
mr
);
dma_mr_err:
dma_mr_err:
for
(
j
=
0
;
j
<
device
->
cqs_used
;
j
++
)
for
(
i
=
0
;
i
<
device
->
cqs_used
;
i
++
)
tasklet_kill
(
&
device
->
cq_tasklet
[
j
]);
tasklet_kill
(
&
device
->
cq_tasklet
[
i
]);
cq_err:
cq_err:
for
(
j
=
0
;
j
<
i
;
j
++
)
{
for
(
i
=
0
;
i
<
device
->
cqs_used
;
i
++
)
{
if
(
device
->
tx_cq
[
j
])
if
(
device
->
tx_cq
[
i
])
ib_destroy_cq
(
device
->
tx_cq
[
j
]);
ib_destroy_cq
(
device
->
tx_cq
[
i
]);
if
(
device
->
rx_cq
[
j
])
if
(
device
->
rx_cq
[
i
])
ib_destroy_cq
(
device
->
rx_cq
[
j
]);
ib_destroy_cq
(
device
->
rx_cq
[
i
]);
}
}
ib_dealloc_pd
(
device
->
pd
);
ib_dealloc_pd
(
device
->
pd
);
pd_err:
pd_err:
...
...
drivers/net/ethernet/mellanox/mlx4/mr.c
View file @
3bdad2d1
...
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
...
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
MLX4_CMD_TIME_CLASS_B
,
MLX4_CMD_WRAPPED
);
MLX4_CMD_TIME_CLASS_B
,
MLX4_CMD_WRAPPED
);
}
}
/* Must protect against concurrent access */
int
mlx4_mr_hw_get_mpt
(
struct
mlx4_dev
*
dev
,
struct
mlx4_mr
*
mmr
,
int
mlx4_mr_hw_get_mpt
(
struct
mlx4_dev
*
dev
,
struct
mlx4_mr
*
mmr
,
struct
mlx4_mpt_entry
***
mpt_entry
)
struct
mlx4_mpt_entry
***
mpt_entry
)
{
{
...
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
...
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
int
key
=
key_to_hw_index
(
mmr
->
key
)
&
(
dev
->
caps
.
num_mpts
-
1
);
int
key
=
key_to_hw_index
(
mmr
->
key
)
&
(
dev
->
caps
.
num_mpts
-
1
);
struct
mlx4_cmd_mailbox
*
mailbox
=
NULL
;
struct
mlx4_cmd_mailbox
*
mailbox
=
NULL
;
/* Make sure that at this point we have single-threaded access only */
if
(
mmr
->
enabled
!=
MLX4_MPT_EN_HW
)
if
(
mmr
->
enabled
!=
MLX4_MPT_EN_HW
)
return
-
EINVAL
;
return
-
EINVAL
;
err
=
mlx4_HW2SW_MPT
(
dev
,
NULL
,
key
);
err
=
mlx4_HW2SW_MPT
(
dev
,
NULL
,
key
);
if
(
err
)
{
if
(
err
)
{
mlx4_warn
(
dev
,
"HW2SW_MPT failed (%d)."
,
err
);
mlx4_warn
(
dev
,
"HW2SW_MPT failed (%d)."
,
err
);
mlx4_warn
(
dev
,
"Most likely the MR has MWs bound to it.
\n
"
);
mlx4_warn
(
dev
,
"Most likely the MR has MWs bound to it.
\n
"
);
...
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
...
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
0
,
MLX4_CMD_QUERY_MPT
,
0
,
MLX4_CMD_QUERY_MPT
,
MLX4_CMD_TIME_CLASS_B
,
MLX4_CMD_TIME_CLASS_B
,
MLX4_CMD_WRAPPED
);
MLX4_CMD_WRAPPED
);
if
(
err
)
if
(
err
)
goto
free_mailbox
;
goto
free_mailbox
;
...
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
...
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
err
=
mlx4_SW2HW_MPT
(
dev
,
mailbox
,
key
);
err
=
mlx4_SW2HW_MPT
(
dev
,
mailbox
,
key
);
}
}
mmr
->
pd
=
be32_to_cpu
((
*
mpt_entry
)
->
pd_flags
)
&
MLX4_MPT_PD_MASK
;
if
(
!
err
)
{
if
(
!
err
)
mmr
->
pd
=
be32_to_cpu
((
*
mpt_entry
)
->
pd_flags
)
&
MLX4_MPT_PD_MASK
;
mmr
->
enabled
=
MLX4_MPT_EN_HW
;
mmr
->
enabled
=
MLX4_MPT_EN_HW
;
}
return
err
;
return
err
;
}
}
EXPORT_SYMBOL_GPL
(
mlx4_mr_hw_write_mpt
);
EXPORT_SYMBOL_GPL
(
mlx4_mr_hw_write_mpt
);
...
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
...
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
int
mlx4_mr_hw_change_pd
(
struct
mlx4_dev
*
dev
,
struct
mlx4_mpt_entry
*
mpt_entry
,
int
mlx4_mr_hw_change_pd
(
struct
mlx4_dev
*
dev
,
struct
mlx4_mpt_entry
*
mpt_entry
,
u32
pdn
)
u32
pdn
)
{
{
u32
pd_flags
=
be32_to_cpu
(
mpt_entry
->
pd_flags
);
u32
pd_flags
=
be32_to_cpu
(
mpt_entry
->
pd_flags
)
&
~
MLX4_MPT_PD_MASK
;
/* The wrapper function will put the slave's id here */
/* The wrapper function will put the slave's id here */
if
(
mlx4_is_mfunc
(
dev
))
if
(
mlx4_is_mfunc
(
dev
))
pd_flags
&=
~
MLX4_MPT_PD_VF_MASK
;
pd_flags
&=
~
MLX4_MPT_PD_VF_MASK
;
mpt_entry
->
pd_flags
=
cpu_to_be32
((
pd_flags
&
~
MLX4_MPT_PD_MASK
)
|
mpt_entry
->
pd_flags
=
cpu_to_be32
(
pd_flags
|
(
pdn
&
MLX4_MPT_PD_MASK
)
(
pdn
&
MLX4_MPT_PD_MASK
)
|
MLX4_MPT_PD_FLAG_EN_INV
);
|
MLX4_MPT_PD_FLAG_EN_INV
);
return
0
;
return
0
;
...
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
...
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
{
{
int
err
;
int
err
;
mpt_entry
->
start
=
cpu_to_be64
(
mr
->
iova
);
mpt_entry
->
start
=
cpu_to_be64
(
iova
);
mpt_entry
->
length
=
cpu_to_be64
(
mr
->
size
);
mpt_entry
->
length
=
cpu_to_be64
(
size
);
mpt_entry
->
entity_size
=
cpu_to_be32
(
mr
->
mtt
.
page_shift
);
mpt_entry
->
entity_size
=
cpu_to_be32
(
page_shift
);
err
=
mlx4_mtt_init
(
dev
,
npages
,
page_shift
,
&
mr
->
mtt
);
err
=
mlx4_mtt_init
(
dev
,
npages
,
page_shift
,
&
mr
->
mtt
);
if
(
err
)
if
(
err
)
return
err
;
return
err
;
mpt_entry
->
pd_flags
&=
cpu_to_be32
(
MLX4_MPT_PD_MASK
|
MLX4_MPT_PD_FLAG_EN_INV
);
mpt_entry
->
flags
&=
cpu_to_be32
(
MLX4_MPT_FLAG_FREE
|
MLX4_MPT_FLAG_SW_OWNS
);
if
(
mr
->
mtt
.
order
<
0
)
{
if
(
mr
->
mtt
.
order
<
0
)
{
mpt_entry
->
flags
|=
cpu_to_be32
(
MLX4_MPT_FLAG_PHYSICAL
);
mpt_entry
->
flags
|=
cpu_to_be32
(
MLX4_MPT_FLAG_PHYSICAL
);
mpt_entry
->
mtt_addr
=
0
;
mpt_entry
->
mtt_addr
=
0
;
...
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
...
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
if
(
mr
->
mtt
.
page_shift
==
0
)
if
(
mr
->
mtt
.
page_shift
==
0
)
mpt_entry
->
mtt_sz
=
cpu_to_be32
(
1
<<
mr
->
mtt
.
order
);
mpt_entry
->
mtt_sz
=
cpu_to_be32
(
1
<<
mr
->
mtt
.
order
);
}
}
if
(
mr
->
mtt
.
order
>=
0
&&
mr
->
mtt
.
page_shift
==
0
)
{
/* fast register MR in free state */
mpt_entry
->
flags
|=
cpu_to_be32
(
MLX4_MPT_FLAG_FREE
);
mpt_entry
->
pd_flags
|=
cpu_to_be32
(
MLX4_MPT_PD_FLAG_FAST_REG
|
MLX4_MPT_PD_FLAG_RAE
);
}
else
{
mpt_entry
->
flags
|=
cpu_to_be32
(
MLX4_MPT_FLAG_SW_OWNS
);
}
mr
->
enabled
=
MLX4_MPT_EN_SW
;
mr
->
enabled
=
MLX4_MPT_EN_SW
;
return
0
;
return
0
;
...
...
drivers/net/ethernet/mellanox/mlx4/port.c
View file @
3bdad2d1
...
@@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev,
...
@@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev,
int
i
;
int
i
;
for
(
i
=
0
;
i
<
MLX4_MAX_MAC_NUM
;
i
++
)
{
for
(
i
=
0
;
i
<
MLX4_MAX_MAC_NUM
;
i
++
)
{
if
((
mac
&
MLX4_MAC_MASK
)
==
if
(
table
->
refs
[
i
]
&&
(
MLX4_MAC_MASK
&
mac
)
==
(
MLX4_MAC_MASK
&
be64_to_cpu
(
table
->
entries
[
i
])))
(
MLX4_MAC_MASK
&
be64_to_cpu
(
table
->
entries
[
i
])))
return
i
;
return
i
;
}
}
...
@@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
...
@@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
mutex_lock
(
&
table
->
mutex
);
mutex_lock
(
&
table
->
mutex
);
for
(
i
=
0
;
i
<
MLX4_MAX_MAC_NUM
;
i
++
)
{
for
(
i
=
0
;
i
<
MLX4_MAX_MAC_NUM
;
i
++
)
{
if
(
free
<
0
&&
!
table
->
entries
[
i
])
{
if
(
!
table
->
refs
[
i
])
{
free
=
i
;
if
(
free
<
0
)
free
=
i
;
continue
;
continue
;
}
}
if
(
mac
==
(
MLX4_MAC_MASK
&
be64_to_cpu
(
table
->
entries
[
i
])))
{
if
((
MLX4_MAC_MASK
&
mac
)
==
(
MLX4_MAC_MASK
&
be64_to_cpu
(
table
->
entries
[
i
])))
{
/* MAC already registered, increment ref count */
/* MAC already registered, increment ref count */
err
=
i
;
err
=
i
;
++
table
->
refs
[
i
];
++
table
->
refs
[
i
];
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment