Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
8d34ff34
Commit
8d34ff34
authored
Jun 14, 2009
by
Roland Dreier
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'cxgb3', 'ehca', 'misc', 'mlx4', 'mthca' and 'nes' into for-linus
parents
3026c19a
25a52393
5b891a93
2ac6bf4d
d1fdf24b
28e43a51
Changes
24
Hide whitespace changes
Inline
Side-by-side
Showing
24 changed files
with
152 additions
and
133 deletions
+152
-133
drivers/infiniband/hw/amso1100/c2_cq.c
drivers/infiniband/hw/amso1100/c2_cq.c
+2
-2
drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
+0
-28
drivers/infiniband/hw/ehca/ehca_irq.c
drivers/infiniband/hw/ehca/ehca_irq.c
+4
-5
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ehca/ehca_main.c
+1
-1
drivers/infiniband/hw/ehca/ehca_qp.c
drivers/infiniband/hw/ehca/ehca_qp.c
+58
-54
drivers/infiniband/hw/ehca/hcp_if.c
drivers/infiniband/hw/ehca/hcp_if.c
+3
-3
drivers/infiniband/hw/ehca/hcp_if.h
drivers/infiniband/hw/ehca/hcp_if.h
+1
-1
drivers/infiniband/hw/ehca/hcp_phyp.c
drivers/infiniband/hw/ehca/hcp_phyp.c
+7
-4
drivers/infiniband/hw/ehca/hcp_phyp.h
drivers/infiniband/hw/ehca/hcp_phyp.h
+1
-1
drivers/infiniband/hw/ehca/ipz_pt_fn.c
drivers/infiniband/hw/ehca/ipz_pt_fn.c
+14
-5
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/qp.c
+4
-0
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_cmd.c
+1
-1
drivers/infiniband/hw/mthca/mthca_dev.h
drivers/infiniband/hw/mthca/mthca_dev.h
+1
-0
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/mthca/mthca_eq.c
+3
-1
drivers/infiniband/hw/mthca/mthca_main.c
drivers/infiniband/hw/mthca/mthca_main.c
+14
-3
drivers/infiniband/hw/mthca/mthca_mr.c
drivers/infiniband/hw/mthca/mthca_mr.c
+8
-8
drivers/infiniband/hw/mthca/mthca_profile.c
drivers/infiniband/hw/mthca/mthca_profile.c
+2
-2
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.c
+7
-7
drivers/net/mlx4/eq.c
drivers/net/mlx4/eq.c
+3
-1
drivers/net/mlx4/main.c
drivers/net/mlx4/main.c
+12
-2
drivers/net/mlx4/mr.c
drivers/net/mlx4/mr.c
+3
-3
drivers/net/mlx4/profile.c
drivers/net/mlx4/profile.c
+1
-1
include/linux/mlx4/device.h
include/linux/mlx4/device.h
+1
-0
include/linux/mlx4/qp.h
include/linux/mlx4/qp.h
+1
-0
No files found.
drivers/infiniband/hw/amso1100/c2_cq.c
View file @
8d34ff34
...
...
@@ -133,7 +133,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
struct
c2_qp
*
qp
;
int
is_recv
=
0
;
ce
=
(
struct
c2wr_ce
*
)
c2_mq_consume
(
&
cq
->
mq
);
ce
=
c2_mq_consume
(
&
cq
->
mq
);
if
(
!
ce
)
{
return
-
EAGAIN
;
}
...
...
@@ -146,7 +146,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
while
((
qp
=
(
struct
c2_qp
*
)
(
unsigned
long
)
ce
->
qp_user_context
)
==
NULL
)
{
c2_mq_free
(
&
cq
->
mq
);
ce
=
(
struct
c2wr_ce
*
)
c2_mq_consume
(
&
cq
->
mq
);
ce
=
c2_mq_consume
(
&
cq
->
mq
);
if
(
!
ce
)
return
-
EAGAIN
;
}
...
...
drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
View file @
8d34ff34
...
...
@@ -165,7 +165,6 @@ struct hcp_modify_qp_control_block {
#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
#define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
...
...
@@ -176,60 +175,33 @@ struct hcp_modify_qp_control_block {
#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
#define MQPCB_DLID EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31)
#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31)
#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31)
#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31)
#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31)
#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
#define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31)
#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31)
#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31)
#define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31)
#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
...
...
drivers/infiniband/hw/ehca/ehca_irq.c
View file @
8d34ff34
...
...
@@ -479,13 +479,13 @@ void ehca_tasklet_neq(unsigned long data)
struct
ehca_eqe
*
eqe
;
u64
ret
;
eqe
=
(
struct
ehca_eqe
*
)
ehca_poll_eq
(
shca
,
&
shca
->
neq
);
eqe
=
ehca_poll_eq
(
shca
,
&
shca
->
neq
);
while
(
eqe
)
{
if
(
!
EHCA_BMASK_GET
(
NEQE_COMPLETION_EVENT
,
eqe
->
entry
))
parse_ec
(
shca
,
eqe
->
entry
);
eqe
=
(
struct
ehca_eqe
*
)
ehca_poll_eq
(
shca
,
&
shca
->
neq
);
eqe
=
ehca_poll_eq
(
shca
,
&
shca
->
neq
);
}
ret
=
hipz_h_reset_event
(
shca
->
ipz_hca_handle
,
...
...
@@ -572,8 +572,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
eqe_cnt
=
0
;
do
{
u32
token
;
eqe_cache
[
eqe_cnt
].
eqe
=
(
struct
ehca_eqe
*
)
ehca_poll_eq
(
shca
,
eq
);
eqe_cache
[
eqe_cnt
].
eqe
=
ehca_poll_eq
(
shca
,
eq
);
if
(
!
eqe_cache
[
eqe_cnt
].
eqe
)
break
;
eqe_value
=
eqe_cache
[
eqe_cnt
].
eqe
->
entry
;
...
...
@@ -637,7 +636,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
goto
unlock_irq_spinlock
;
do
{
struct
ehca_eqe
*
eqe
;
eqe
=
(
struct
ehca_eqe
*
)
ehca_poll_eq
(
shca
,
&
shca
->
eq
);
eqe
=
ehca_poll_eq
(
shca
,
&
shca
->
eq
);
if
(
!
eqe
)
break
;
process_eqe
(
shca
,
eqe
);
...
...
drivers/infiniband/hw/ehca/ehca_main.c
View file @
8d34ff34
...
...
@@ -52,7 +52,7 @@
#include "ehca_tools.h"
#include "hcp_if.h"
#define HCAD_VERSION "002
6
"
#define HCAD_VERSION "002
7
"
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_AUTHOR
(
"Christoph Raisch <raisch@de.ibm.com>"
);
...
...
drivers/infiniband/hw/ehca/ehca_qp.c
View file @
8d34ff34
...
...
@@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp(
ib_device
);
struct
ib_ucontext
*
context
=
NULL
;
u64
h_ret
;
int
is_llqp
=
0
,
has_srq
=
0
;
int
is_llqp
=
0
,
has_srq
=
0
,
is_user
=
0
;
int
qp_type
,
max_send_sge
,
max_recv_sge
,
ret
;
/* h_call's out parameters */
...
...
@@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp(
}
}
if
(
pd
->
uobject
&&
udata
)
context
=
pd
->
uobject
->
context
;
my_qp
=
kmem_cache_zalloc
(
qp_cache
,
GFP_KERNEL
);
if
(
!
my_qp
)
{
ehca_err
(
pd
->
device
,
"pd=%p not enough memory to alloc qp"
,
pd
);
...
...
@@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp(
return
ERR_PTR
(
-
ENOMEM
);
}
if
(
pd
->
uobject
&&
udata
)
{
is_user
=
1
;
context
=
pd
->
uobject
->
context
;
}
atomic_set
(
&
my_qp
->
nr_events
,
0
);
init_waitqueue_head
(
&
my_qp
->
wait_completion
);
spin_lock_init
(
&
my_qp
->
spinlock_s
);
...
...
@@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
(
parms
.
squeue
.
is_small
||
parms
.
rqueue
.
is_small
);
}
h_ret
=
hipz_h_alloc_resource_qp
(
shca
->
ipz_hca_handle
,
&
parms
);
h_ret
=
hipz_h_alloc_resource_qp
(
shca
->
ipz_hca_handle
,
&
parms
,
is_user
);
if
(
h_ret
!=
H_SUCCESS
)
{
ehca_err
(
pd
->
device
,
"h_alloc_resource_qp() failed h_ret=%lli"
,
h_ret
);
...
...
@@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp(
goto
create_qp_exit2
;
}
my_qp
->
sq_map
.
entries
=
my_qp
->
ipz_squeue
.
queue_length
/
my_qp
->
ipz_squeue
.
qe_size
;
my_qp
->
sq_map
.
map
=
vmalloc
(
my_qp
->
sq_map
.
entries
*
sizeof
(
struct
ehca_qmap_entry
));
if
(
!
my_qp
->
sq_map
.
map
)
{
ehca_err
(
pd
->
device
,
"Couldn't allocate squeue "
"map ret=%i"
,
ret
);
goto
create_qp_exit3
;
if
(
!
is_user
)
{
my_qp
->
sq_map
.
entries
=
my_qp
->
ipz_squeue
.
queue_length
/
my_qp
->
ipz_squeue
.
qe_size
;
my_qp
->
sq_map
.
map
=
vmalloc
(
my_qp
->
sq_map
.
entries
*
sizeof
(
struct
ehca_qmap_entry
));
if
(
!
my_qp
->
sq_map
.
map
)
{
ehca_err
(
pd
->
device
,
"Couldn't allocate squeue "
"map ret=%i"
,
ret
);
goto
create_qp_exit3
;
}
INIT_LIST_HEAD
(
&
my_qp
->
sq_err_node
);
/* to avoid the generation of bogus flush CQEs */
reset_queue_map
(
&
my_qp
->
sq_map
);
}
INIT_LIST_HEAD
(
&
my_qp
->
sq_err_node
);
/* to avoid the generation of bogus flush CQEs */
reset_queue_map
(
&
my_qp
->
sq_map
);
}
if
(
HAS_RQ
(
my_qp
))
{
...
...
@@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp(
"and pages ret=%i"
,
ret
);
goto
create_qp_exit4
;
}
my_qp
->
rq_map
.
entries
=
my_qp
->
ipz_rqueue
.
queue_length
/
my_qp
->
ipz_rqueue
.
qe_size
;
my_qp
->
rq_map
.
map
=
vmalloc
(
my_qp
->
rq_map
.
entries
*
sizeof
(
struct
ehca_qmap_entry
));
if
(
!
my_qp
->
rq_map
.
map
)
{
ehca_err
(
pd
->
device
,
"Couldn't allocate squeue "
"map ret=%i"
,
ret
);
goto
create_qp_exit5
;
if
(
!
is_user
)
{
my_qp
->
rq_map
.
entries
=
my_qp
->
ipz_rqueue
.
queue_length
/
my_qp
->
ipz_rqueue
.
qe_size
;
my_qp
->
rq_map
.
map
=
vmalloc
(
my_qp
->
rq_map
.
entries
*
sizeof
(
struct
ehca_qmap_entry
));
if
(
!
my_qp
->
rq_map
.
map
)
{
ehca_err
(
pd
->
device
,
"Couldn't allocate squeue "
"map ret=%i"
,
ret
);
goto
create_qp_exit5
;
}
INIT_LIST_HEAD
(
&
my_qp
->
rq_err_node
);
/* to avoid the generation of bogus flush CQEs */
reset_queue_map
(
&
my_qp
->
rq_map
);
}
INIT_LIST_HEAD
(
&
my_qp
->
rq_err_node
);
/* to avoid the generation of bogus flush CQEs */
reset_queue_map
(
&
my_qp
->
rq_map
);
}
else
if
(
init_attr
->
srq
)
{
}
else
if
(
init_attr
->
srq
&&
!
is_user
)
{
/* this is a base QP, use the queue map of the SRQ */
my_qp
->
rq_map
=
my_srq
->
rq_map
;
INIT_LIST_HEAD
(
&
my_qp
->
rq_err_node
);
...
...
@@ -918,7 +923,7 @@ static struct ehca_qp *internal_create_qp(
kfree
(
my_qp
->
mod_qp_parm
);
create_qp_exit6:
if
(
HAS_RQ
(
my_qp
))
if
(
HAS_RQ
(
my_qp
)
&&
!
is_user
)
vfree
(
my_qp
->
rq_map
.
map
);
create_qp_exit5:
...
...
@@ -926,7 +931,7 @@ static struct ehca_qp *internal_create_qp(
ipz_queue_dtor
(
my_pd
,
&
my_qp
->
ipz_rqueue
);
create_qp_exit4:
if
(
HAS_SQ
(
my_qp
))
if
(
HAS_SQ
(
my_qp
)
&&
!
is_user
)
vfree
(
my_qp
->
sq_map
.
map
);
create_qp_exit3:
...
...
@@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
u64
update_mask
;
u64
h_ret
;
int
bad_wqe_cnt
=
0
;
int
is_user
=
0
;
int
squeue_locked
=
0
;
unsigned
long
flags
=
0
;
...
...
@@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
ret
=
ehca2ib_return_code
(
h_ret
);
goto
modify_qp_exit1
;
}
if
(
ibqp
->
uobject
)
is_user
=
1
;
qp_cur_state
=
ehca2ib_qp_state
(
mqpcb
->
qp_state
);
...
...
@@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
goto
modify_qp_exit2
;
}
}
if
((
qp_new_state
==
IB_QPS_ERR
)
&&
(
qp_cur_state
!=
IB_QPS_ERR
))
{
if
((
qp_new_state
==
IB_QPS_ERR
)
&&
(
qp_cur_state
!=
IB_QPS_ERR
)
&&
!
is_user
)
{
ret
=
check_for_left_cqes
(
my_qp
,
shca
);
if
(
ret
)
goto
modify_qp_exit2
;
...
...
@@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,
ipz_qeit_reset
(
&
my_qp
->
ipz_rqueue
);
ipz_qeit_reset
(
&
my_qp
->
ipz_squeue
);
if
(
qp_cur_state
==
IB_QPS_ERR
)
{
if
(
qp_cur_state
==
IB_QPS_ERR
&&
!
is_user
)
{
del_from_err_list
(
my_qp
->
send_cq
,
&
my_qp
->
sq_err_node
);
if
(
HAS_RQ
(
my_qp
))
del_from_err_list
(
my_qp
->
recv_cq
,
&
my_qp
->
rq_err_node
);
}
reset_queue_map
(
&
my_qp
->
sq_map
);
if
(
!
is_user
)
reset_queue_map
(
&
my_qp
->
sq_map
);
if
(
HAS_RQ
(
my_qp
))
if
(
HAS_RQ
(
my_qp
)
&&
!
is_user
)
reset_queue_map
(
&
my_qp
->
rq_map
);
}
...
...
@@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp,
qp_attr
->
cap
.
max_inline_data
=
my_qp
->
sq_max_inline_data_size
;
qp_attr
->
dest_qp_num
=
qpcb
->
dest_qp_nr
;
qp_attr
->
pkey_index
=
EHCA_BMASK_GET
(
MQPCB_PRIM_P_KEY_IDX
,
qpcb
->
prim_p_key_idx
);
qp_attr
->
port_num
=
EHCA_BMASK_GET
(
MQPCB_PRIM_PHYS_PORT
,
qpcb
->
prim_phys_port
);
qp_attr
->
pkey_index
=
qpcb
->
prim_p_key_idx
;
qp_attr
->
port_num
=
qpcb
->
prim_phys_port
;
qp_attr
->
timeout
=
qpcb
->
timeout
;
qp_attr
->
retry_cnt
=
qpcb
->
retry_count
;
qp_attr
->
rnr_retry
=
qpcb
->
rnr_retry_count
;
qp_attr
->
alt_pkey_index
=
EHCA_BMASK_GET
(
MQPCB_PRIM_P_KEY_IDX
,
qpcb
->
alt_p_key_idx
);
qp_attr
->
alt_pkey_index
=
qpcb
->
alt_p_key_idx
;
qp_attr
->
alt_port_num
=
qpcb
->
alt_phys_port
;
qp_attr
->
alt_timeout
=
qpcb
->
timeout_al
;
...
...
@@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
update_mask
|=
EHCA_BMASK_SET
(
MQPCB_MASK_CURR_SRQ_LIMIT
,
1
)
|
EHCA_BMASK_SET
(
MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG
,
1
);
mqpcb
->
curr_srq_limit
=
EHCA_BMASK_SET
(
MQPCB_CURR_SRQ_LIMIT
,
attr
->
srq_limit
);
mqpcb
->
curr_srq_limit
=
attr
->
srq_limit
;
mqpcb
->
qp_aff_asyn_ev_log_reg
=
EHCA_BMASK_SET
(
QPX_AAELOG_RESET_SRQ_LIMIT
,
1
);
}
...
...
@@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
srq_attr
->
max_wr
=
qpcb
->
max_nr_outst_recv_wr
-
1
;
srq_attr
->
max_sge
=
3
;
srq_attr
->
srq_limit
=
EHCA_BMASK_GET
(
MQPCB_CURR_SRQ_LIMIT
,
qpcb
->
curr_srq_limit
);
srq_attr
->
srq_limit
=
qpcb
->
curr_srq_limit
;
if
(
ehca_debug_level
>=
2
)
ehca_dmp
(
qpcb
,
4
*
70
,
"qp_num=%x"
,
my_qp
->
real_qp_num
);
...
...
@@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
int
ret
;
u64
h_ret
;
u8
port_num
;
int
is_user
=
0
;
enum
ib_qp_type
qp_type
;
unsigned
long
flags
;
if
(
uobject
)
{
is_user
=
1
;
if
(
my_qp
->
mm_count_galpa
||
my_qp
->
mm_count_rqueue
||
my_qp
->
mm_count_squeue
)
{
ehca_err
(
dev
,
"Resources still referenced in "
...
...
@@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
* SRQs will never get into an error list and do not have a recv_cq,
* so we need to skip them here.
*/
if
(
HAS_RQ
(
my_qp
)
&&
!
IS_SRQ
(
my_qp
))
if
(
HAS_RQ
(
my_qp
)
&&
!
IS_SRQ
(
my_qp
)
&&
!
is_user
)
del_from_err_list
(
my_qp
->
recv_cq
,
&
my_qp
->
rq_err_node
);
if
(
HAS_SQ
(
my_qp
))
if
(
HAS_SQ
(
my_qp
)
&&
!
is_user
)
del_from_err_list
(
my_qp
->
send_cq
,
&
my_qp
->
sq_err_node
);
/* now wait until all pending events have completed */
...
...
@@ -2209,13 +2213,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
if
(
HAS_RQ
(
my_qp
))
{
ipz_queue_dtor
(
my_pd
,
&
my_qp
->
ipz_rqueue
);
vfree
(
my_qp
->
rq_map
.
map
);
if
(
!
is_user
)
vfree
(
my_qp
->
rq_map
.
map
);
}
if
(
HAS_SQ
(
my_qp
))
{
ipz_queue_dtor
(
my_pd
,
&
my_qp
->
ipz_squeue
);
vfree
(
my_qp
->
sq_map
.
map
);
if
(
!
is_user
)
vfree
(
my_qp
->
sq_map
.
map
);
}
kmem_cache_free
(
qp_cache
,
my_qp
);
atomic_dec
(
&
shca
->
num_qps
);
...
...
drivers/infiniband/hw/ehca/hcp_if.c
View file @
8d34ff34
...
...
@@ -284,7 +284,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
param
->
act_pages
=
(
u32
)
outs
[
4
];
if
(
ret
==
H_SUCCESS
)
hcp_galpas_ctor
(
&
cq
->
galpas
,
outs
[
5
],
outs
[
6
]);
hcp_galpas_ctor
(
&
cq
->
galpas
,
0
,
outs
[
5
],
outs
[
6
]);
if
(
ret
==
H_NOT_ENOUGH_RESOURCES
)
ehca_gen_err
(
"Not enough resources. ret=%lli"
,
ret
);
...
...
@@ -293,7 +293,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
}
u64
hipz_h_alloc_resource_qp
(
const
struct
ipz_adapter_handle
adapter_handle
,
struct
ehca_alloc_qp_parms
*
parms
)
struct
ehca_alloc_qp_parms
*
parms
,
int
is_user
)
{
u64
ret
;
u64
allocate_controls
,
max_r10_reg
,
r11
,
r12
;
...
...
@@ -359,7 +359,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
(
u32
)
EHCA_BMASK_GET
(
H_ALL_RES_QP_RQUEUE_SIZE_PAGES
,
outs
[
4
]);
if
(
ret
==
H_SUCCESS
)
hcp_galpas_ctor
(
&
parms
->
galpas
,
outs
[
6
],
outs
[
6
]);
hcp_galpas_ctor
(
&
parms
->
galpas
,
is_user
,
outs
[
6
],
outs
[
6
]);
if
(
ret
==
H_NOT_ENOUGH_RESOURCES
)
ehca_gen_err
(
"Not enough resources. ret=%lli"
,
ret
);
...
...
drivers/infiniband/hw/ehca/hcp_if.h
View file @
8d34ff34
...
...
@@ -78,7 +78,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
* initialize resources, create empty QPPTs (2 rings).
*/
u64
hipz_h_alloc_resource_qp
(
const
struct
ipz_adapter_handle
adapter_handle
,
struct
ehca_alloc_qp_parms
*
parms
);
struct
ehca_alloc_qp_parms
*
parms
,
int
is_user
);
u64
hipz_h_query_port
(
const
struct
ipz_adapter_handle
adapter_handle
,
const
u8
port_id
,
...
...
drivers/infiniband/hw/ehca/hcp_phyp.c
View file @
8d34ff34
...
...
@@ -54,12 +54,15 @@ int hcall_unmap_page(u64 mapaddr)
return
0
;
}
int
hcp_galpas_ctor
(
struct
h_galpas
*
galpas
,
int
hcp_galpas_ctor
(
struct
h_galpas
*
galpas
,
int
is_user
,
u64
paddr_kernel
,
u64
paddr_user
)
{
int
ret
=
hcall_map_page
(
paddr_kernel
,
&
galpas
->
kernel
.
fw_handle
);
if
(
ret
)
return
ret
;
if
(
!
is_user
)
{
int
ret
=
hcall_map_page
(
paddr_kernel
,
&
galpas
->
kernel
.
fw_handle
);
if
(
ret
)
return
ret
;
}
else
galpas
->
kernel
.
fw_handle
=
0
;
galpas
->
user
.
fw_handle
=
paddr_user
;
...
...
drivers/infiniband/hw/ehca/hcp_phyp.h
View file @
8d34ff34
...
...
@@ -78,7 +78,7 @@ static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
*
(
volatile
u64
__force
*
)
addr
=
value
;
}
int
hcp_galpas_ctor
(
struct
h_galpas
*
galpas
,
int
hcp_galpas_ctor
(
struct
h_galpas
*
galpas
,
int
is_user
,
u64
paddr_kernel
,
u64
paddr_user
);
int
hcp_galpas_dtor
(
struct
h_galpas
*
galpas
);
...
...
drivers/infiniband/hw/ehca/ipz_pt_fn.c
View file @
8d34ff34
...
...
@@ -220,10 +220,13 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
queue
->
small_page
=
NULL
;
/* allocate queue page pointers */
queue
->
queue_pages
=
vmalloc
(
nr_of_pages
*
sizeof
(
void
*
)
);
queue
->
queue_pages
=
kmalloc
(
nr_of_pages
*
sizeof
(
void
*
),
GFP_KERNEL
);
if
(
!
queue
->
queue_pages
)
{
ehca_gen_err
(
"Couldn't allocate queue page list"
);
return
0
;
queue
->
queue_pages
=
vmalloc
(
nr_of_pages
*
sizeof
(
void
*
));
if
(
!
queue
->
queue_pages
)
{
ehca_gen_err
(
"Couldn't allocate queue page list"
);
return
0
;
}
}
memset
(
queue
->
queue_pages
,
0
,
nr_of_pages
*
sizeof
(
void
*
));
...
...
@@ -240,7 +243,10 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
ipz_queue_ctor_exit0:
ehca_gen_err
(
"Couldn't alloc pages queue=%p "
"nr_of_pages=%x"
,
queue
,
nr_of_pages
);
vfree
(
queue
->
queue_pages
);
if
(
is_vmalloc_addr
(
queue
->
queue_pages
))
vfree
(
queue
->
queue_pages
);
else
kfree
(
queue
->
queue_pages
);
return
0
;
}
...
...
@@ -262,7 +268,10 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
free_page
((
unsigned
long
)
queue
->
queue_pages
[
i
]);
}
vfree
(
queue
->
queue_pages
);
if
(
is_vmalloc_addr
(
queue
->
queue_pages
))
vfree
(
queue
->
queue_pages
);
else
kfree
(
queue
->
queue_pages
);
return
1
;
}
...
...
drivers/infiniband/hw/mlx4/qp.c
View file @
8d34ff34
...
...
@@ -1585,12 +1585,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break
;
case
IB_WR_LOCAL_INV
:
ctrl
->
srcrb_flags
|=
cpu_to_be32
(
MLX4_WQE_CTRL_STRONG_ORDER
);
set_local_inv_seg
(
wqe
,
wr
->
ex
.
invalidate_rkey
);
wqe
+=
sizeof
(
struct
mlx4_wqe_local_inval_seg
);
size
+=
sizeof
(
struct
mlx4_wqe_local_inval_seg
)
/
16
;
break
;
case
IB_WR_FAST_REG_MR
:
ctrl
->
srcrb_flags
|=
cpu_to_be32
(
MLX4_WQE_CTRL_STRONG_ORDER
);
set_fmr_seg
(
wqe
,
wr
);
wqe
+=
sizeof
(
struct
mlx4_wqe_fmr_seg
);
size
+=
sizeof
(
struct
mlx4_wqe_fmr_seg
)
/
16
;
...
...
drivers/infiniband/hw/mthca/mthca_cmd.c
View file @
8d34ff34
...
...
@@ -1059,7 +1059,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
MTHCA_GET
(
field
,
outbox
,
QUERY_DEV_LIM_RSVD_MTT_OFFSET
);
if
(
mthca_is_memfree
(
dev
))
dev_lim
->
reserved_mtts
=
ALIGN
((
1
<<
(
field
>>
4
))
*
sizeof
(
u64
),
MTHCA_MTT_SEG_SIZE
)
/
MTHCA_MTT_SEG_SIZE
;
dev
->
limits
.
mtt_seg_size
)
/
dev
->
limits
.
mtt_seg_size
;
else
dev_lim
->
reserved_mtts
=
1
<<
(
field
>>
4
);
MTHCA_GET
(
field
,
outbox
,
QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET
);
...
...
drivers/infiniband/hw/mthca/mthca_dev.h
View file @
8d34ff34
...
...
@@ -159,6 +159,7 @@ struct mthca_limits {
int
reserved_eqs
;
int
num_mpts
;
int
num_mtt_segs
;
int
mtt_seg_size
;
int
fmr_reserved_mtts
;
int
reserved_mtts
;
int
reserved_mrws
;
...
...
drivers/infiniband/hw/mthca/mthca_eq.c
View file @
8d34ff34
...
...
@@ -641,9 +641,11 @@ static void mthca_free_irqs(struct mthca_dev *dev)
if
(
dev
->
eq_table
.
have_irq
)
free_irq
(
dev
->
pdev
->
irq
,
dev
);
for
(
i
=
0
;
i
<
MTHCA_NUM_EQ
;
++
i
)
if
(
dev
->
eq_table
.
eq
[
i
].
have_irq
)
if
(
dev
->
eq_table
.
eq
[
i
].
have_irq
)
{
free_irq
(
dev
->
eq_table
.
eq
[
i
].
msi_x_vector
,
dev
->
eq_table
.
eq
+
i
);
dev
->
eq_table
.
eq
[
i
].
have_irq
=
0
;
}
}
static
int
mthca_map_reg
(
struct
mthca_dev
*
dev
,
...
...
drivers/infiniband/hw/mthca/mthca_main.c
View file @
8d34ff34
...
...
@@ -125,6 +125,10 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
MODULE_PARM_DESC
(
fmr_reserved_mtts
,
"number of memory translation table segments reserved for FMR"
);
static
int
log_mtts_per_seg
=
ilog2
(
MTHCA_MTT_SEG_SIZE
/
8
);
module_param_named
(
log_mtts_per_seg
,
log_mtts_per_seg
,
int
,
0444
);
MODULE_PARM_DESC
(
log_mtts_per_seg
,
"Log2 number of MTT entries per segment (1-5)"
);
static
char
mthca_version
[]
__devinitdata
=
DRV_NAME
": Mellanox InfiniBand HCA driver v"
DRV_VERSION
" ("
DRV_RELDATE
")
\n
"
;
...
...
@@ -162,6 +166,7 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
int
err
;
u8
status
;
mdev
->
limits
.
mtt_seg_size
=
(
1
<<
log_mtts_per_seg
)
*
8
;
err
=
mthca_QUERY_DEV_LIM
(
mdev
,
dev_lim
,
&
status
);
if
(
err
)
{
mthca_err
(
mdev
,
"QUERY_DEV_LIM command failed, aborting.
\n
"
);
...
...
@@ -460,11 +465,11 @@ static int mthca_init_icm(struct mthca_dev *mdev,
}
/* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
mdev
->
limits
.
reserved_mtts
=
ALIGN
(
mdev
->
limits
.
reserved_mtts
*
MTHCA_MTT_SEG_SIZE
,
dma_get_cache_alignment
())
/
MTHCA_MTT_SEG_SIZE
;
mdev
->
limits
.
reserved_mtts
=
ALIGN
(
mdev
->
limits
.
reserved_mtts
*
mdev
->
limits
.
mtt_seg_size
,
dma_get_cache_alignment
())
/
mdev
->
limits
.
mtt_seg_size
;
mdev
->
mr_table
.
mtt_table
=
mthca_alloc_icm_table
(
mdev
,
init_hca
->
mtt_base
,
MTHCA_MTT_SEG_SIZE
,
mdev
->
limits
.
mtt_seg_size
,
mdev
->
limits
.
num_mtt_segs
,
mdev
->
limits
.
reserved_mtts
,
1
,
0
);
...
...
@@ -1315,6 +1320,12 @@ static void __init mthca_validate_profile(void)
printk
(
KERN_WARNING
PFX
"Corrected fmr_reserved_mtts to %d.
\n
"
,
hca_profile
.
fmr_reserved_mtts
);
}
if
((
log_mtts_per_seg
<
1
)
||
(
log_mtts_per_seg
>
5
))
{
printk
(
KERN_WARNING
PFX
"bad log_mtts_per_seg (%d). Using default - %d
\n
"
,
log_mtts_per_seg
,
ilog2
(
MTHCA_MTT_SEG_SIZE
/
8
));
log_mtts_per_seg
=
ilog2
(
MTHCA_MTT_SEG_SIZE
/
8
);
}
}
static
int
__init
mthca_init
(
void
)
...
...
drivers/infiniband/hw/mthca/mthca_mr.c
View file @
8d34ff34
...
...
@@ -220,7 +220,7 @@ static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
mtt
->
buddy
=
buddy
;
mtt
->
order
=
0
;
for
(
i
=
MTHCA_MTT_SEG_SIZE
/
8
;
i
<
size
;
i
<<=
1
)
for
(
i
=
dev
->
limits
.
mtt_seg_size
/
8
;
i
<
size
;
i
<<=
1
)
++
mtt
->
order
;
mtt
->
first_seg
=
mthca_alloc_mtt_range
(
dev
,
mtt
->
order
,
buddy
);
...
...
@@ -267,7 +267,7 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
while
(
list_len
>
0
)
{
mtt_entry
[
0
]
=
cpu_to_be64
(
dev
->
mr_table
.
mtt_base
+
mtt
->
first_seg
*
MTHCA_MTT_SEG_SIZE
+
mtt
->
first_seg
*
dev
->
limits
.
mtt_seg_size
+
start_index
*
8
);
mtt_entry
[
1
]
=
0
;
for
(
i
=
0
;
i
<
list_len
&&
i
<
MTHCA_MAILBOX_SIZE
/
8
-
2
;
++
i
)
...
...
@@ -326,7 +326,7 @@ static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
u64
__iomem
*
mtts
;
int
i
;
mtts
=
dev
->
mr_table
.
tavor_fmr
.
mtt_base
+
mtt
->
first_seg
*
MTHCA_MTT_SEG_SIZE
+
mtts
=
dev
->
mr_table
.
tavor_fmr
.
mtt_base
+
mtt
->
first_seg
*
dev
->
limits
.
mtt_seg_size
+
start_index
*
sizeof
(
u64
);
for
(
i
=
0
;
i
<
list_len
;
++
i
)
mthca_write64_raw
(
cpu_to_be64
(
buffer_list
[
i
]
|
MTHCA_MTT_FLAG_PRESENT
),
...
...
@@ -345,10 +345,10 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
/* For Arbel, all MTTs must fit in the same page. */
BUG_ON
(
s
/
PAGE_SIZE
!=
(
s
+
list_len
*
sizeof
(
u64
)
-
1
)
/
PAGE_SIZE
);
/* Require full segments */
BUG_ON
(
s
%
MTHCA_MTT_SEG_SIZE
);
BUG_ON
(
s
%
dev
->
limits
.
mtt_seg_size
);
mtts
=
mthca_table_find
(
dev
->
mr_table
.
mtt_table
,
mtt
->
first_seg
+
s
/
MTHCA_MTT_SEG_SIZE
,
&
dma_handle
);
s
/
dev
->
limits
.
mtt_seg_size
,
&
dma_handle
);
BUG_ON
(
!
mtts
);
...
...
@@ -479,7 +479,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
if
(
mr
->
mtt
)
mpt_entry
->
mtt_seg
=
cpu_to_be64
(
dev
->
mr_table
.
mtt_base
+
mr
->
mtt
->
first_seg
*
MTHCA_MTT_SEG_SIZE
);
mr
->
mtt
->
first_seg
*
dev
->
limits
.
mtt_seg_size
);
if
(
0
)
{
mthca_dbg
(
dev
,
"Dumping MPT entry %08x:
\n
"
,
mr
->
ibmr
.
lkey
);
...
...
@@ -626,7 +626,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
goto
err_out_table
;
}
mtt_seg
=
mr
->
mtt
->
first_seg
*
MTHCA_MTT_SEG_SIZE
;
mtt_seg
=
mr
->
mtt
->
first_seg
*
dev
->
limits
.
mtt_seg_size
;
if
(
mthca_is_memfree
(
dev
))
{
mr
->
mem
.
arbel
.
mtts
=
mthca_table_find
(
dev
->
mr_table
.
mtt_table
,
...
...
@@ -908,7 +908,7 @@ int mthca_init_mr_table(struct mthca_dev *dev)
dev
->
mr_table
.
mtt_base
);
dev
->
mr_table
.
tavor_fmr
.
mtt_base
=
ioremap
(
addr
,
mtts
*
MTHCA_MTT_SEG_SIZE
);
ioremap
(
addr
,
mtts
*
dev
->
limits
.
mtt_seg_size
);
if
(
!
dev
->
mr_table
.
tavor_fmr
.
mtt_base
)
{
mthca_warn
(
dev
,
"MTT ioremap for FMR failed.
\n
"
);
err
=
-
ENOMEM
;
...
...
drivers/infiniband/hw/mthca/mthca_profile.c
View file @
8d34ff34
...
...
@@ -94,7 +94,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
profile
[
MTHCA_RES_RDB
].
size
=
MTHCA_RDB_ENTRY_SIZE
;
profile
[
MTHCA_RES_MCG
].
size
=
MTHCA_MGM_ENTRY_SIZE
;
profile
[
MTHCA_RES_MPT
].
size
=
dev_lim
->
mpt_entry_sz
;
profile
[
MTHCA_RES_MTT
].
size
=
MTHCA_MTT_SEG_SIZE
;
profile
[
MTHCA_RES_MTT
].
size
=
dev
->
limits
.
mtt_seg_size
;
profile
[
MTHCA_RES_UAR
].
size
=
dev_lim
->
uar_scratch_entry_sz
;
profile
[
MTHCA_RES_UDAV
].
size
=
MTHCA_AV_SIZE
;
profile
[
MTHCA_RES_UARC
].
size
=
request
->
uarc_size
;
...
...
@@ -232,7 +232,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
dev
->
limits
.
num_mtt_segs
=
profile
[
i
].
num
;
dev
->
mr_table
.
mtt_base
=
profile
[
i
].
start
;
init_hca
->
mtt_base
=
profile
[
i
].
start
;
init_hca
->
mtt_seg_sz
=
ffs
(
MTHCA_MTT_SEG_SIZE
)
-
7
;
init_hca
->
mtt_seg_sz
=
ffs
(
dev
->
limits
.
mtt_seg_size
)
-
7
;
break
;
case
MTHCA_RES_UAR
:
dev
->
limits
.
num_uars
=
profile
[
i
].
num
;
...
...
drivers/infiniband/hw/nes/nes_hw.c
View file @
8d34ff34
...
...
@@ -667,7 +667,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
i
=
0
;
while
(((
nes_read32
(
nesdev
->
regs
+
NES_SOFTWARE_RESET
)
&
0x00000040
)
==
0
)
&&
i
++
<
10000
)
mdelay
(
1
);
if
(
i
>
=
10000
)
{
if
(
i
>
10000
)
{
nes_debug
(
NES_DBG_INIT
,
"Did not see full soft reset done.
\n
"
);
return
0
;
}
...
...
@@ -675,7 +675,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
i
=
0
;
while
((
nes_read_indexed
(
nesdev
,
NES_IDX_INT_CPU_STATUS
)
!=
0x80
)
&&
i
++
<
10000
)
mdelay
(
1
);
if
(
i
>
=
10000
)
{
if
(
i
>
10000
)
{
printk
(
KERN_ERR
PFX
"Internal CPU not ready, status = %02X
\n
"
,
nes_read_indexed
(
nesdev
,
NES_IDX_INT_CPU_STATUS
));
return
0
;
...
...
@@ -701,7 +701,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
i
=
0
;
while
(((
nes_read32
(
nesdev
->
regs
+
NES_SOFTWARE_RESET
)
&
0x00000040
)
==
0
)
&&
i
++
<
10000
)
mdelay
(
1
);
if
(
i
>
=
10000
)
{
if
(
i
>
10000
)
{
nes_debug
(
NES_DBG_INIT
,
"Did not see port soft reset done.
\n
"
);
return
0
;
}
...
...
@@ -711,7 +711,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
while
(((
u32temp
=
(
nes_read_indexed
(
nesdev
,
NES_IDX_ETH_SERDES_COMMON_STATUS0
)
&
0x0000000f
))
!=
0x0000000f
)
&&
i
++
<
5000
)
mdelay
(
1
);
if
(
i
>
=
5000
)
{
if
(
i
>
5000
)
{
nes_debug
(
NES_DBG_INIT
,
"Serdes 0 not ready, status=%x
\n
"
,
u32temp
);
return
0
;
}
...
...
@@ -722,7 +722,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
while
(((
u32temp
=
(
nes_read_indexed
(
nesdev
,
NES_IDX_ETH_SERDES_COMMON_STATUS1
)
&
0x0000000f
))
!=
0x0000000f
)
&&
i
++
<
5000
)
mdelay
(
1
);
if
(
i
>
=
5000
)
{
if
(
i
>
5000
)
{
nes_debug
(
NES_DBG_INIT
,
"Serdes 1 not ready, status=%x
\n
"
,
u32temp
);
return
0
;
}
...
...
@@ -792,7 +792,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
while
(((
u32temp
=
(
nes_read_indexed
(
nesdev
,
NES_IDX_ETH_SERDES_COMMON_STATUS0
)
&
0x0000000f
))
!=
0x0000000f
)
&&
i
++
<
5000
)
mdelay
(
1
);
if
(
i
>
=
5000
)
{
if
(
i
>
5000
)
{
nes_debug
(
NES_DBG_PHY
,
"Init: serdes 0 not ready, status=%x
\n
"
,
u32temp
);
return
1
;
}
...
...
@@ -815,7 +815,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
while
(((
u32temp
=
(
nes_read_indexed
(
nesdev
,
NES_IDX_ETH_SERDES_COMMON_STATUS1
)
&
0x0000000f
))
!=
0x0000000f
)
&&
(
i
++
<
5000
))
mdelay
(
1
);
if
(
i
>
=
5000
)
{
if
(
i
>
5000
)
{
printk
(
"%s: Init: serdes 1 not ready, status=%x
\n
"
,
__func__
,
u32temp
);
/* return 1; */
}
...
...
drivers/net/mlx4/eq.c
View file @
8d34ff34
...
...
@@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
if
(
eq_table
->
have_irq
)
free_irq
(
dev
->
pdev
->
irq
,
dev
);
for
(
i
=
0
;
i
<
dev
->
caps
.
num_comp_vectors
+
1
;
++
i
)
if
(
eq_table
->
eq
[
i
].
have_irq
)
if
(
eq_table
->
eq
[
i
].
have_irq
)
{
free_irq
(
eq_table
->
eq
[
i
].
irq
,
eq_table
->
eq
+
i
);
eq_table
->
eq
[
i
].
have_irq
=
0
;
}
kfree
(
eq_table
->
irq_names
);
}
...
...
drivers/net/mlx4/main.c
View file @
8d34ff34
...
...
@@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC
(
use_prio
,
"Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)"
);
static
int
log_mtts_per_seg
=
ilog2
(
MLX4_MTT_ENTRY_PER_SEG
);
module_param_named
(
log_mtts_per_seg
,
log_mtts_per_seg
,
int
,
0444
);
MODULE_PARM_DESC
(
log_mtts_per_seg
,
"Log2 number of MTT entries per segment (1-5)"
);
int
mlx4_check_port_params
(
struct
mlx4_dev
*
dev
,
enum
mlx4_port_type
*
port_type
)
{
...
...
@@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev
->
caps
.
max_cqes
=
dev_cap
->
max_cq_sz
-
1
;
dev
->
caps
.
reserved_cqs
=
dev_cap
->
reserved_cqs
;
dev
->
caps
.
reserved_eqs
=
dev_cap
->
reserved_eqs
;
dev
->
caps
.
mtts_per_seg
=
1
<<
log_mtts_per_seg
;
dev
->
caps
.
reserved_mtts
=
DIV_ROUND_UP
(
dev_cap
->
reserved_mtts
,
MLX4_MTT_ENTRY_PER_SEG
);
dev
->
caps
.
mtts_per_seg
);
dev
->
caps
.
reserved_mrws
=
dev_cap
->
reserved_mrws
;
dev
->
caps
.
reserved_uars
=
dev_cap
->
reserved_uars
;
dev
->
caps
.
reserved_pds
=
dev_cap
->
reserved_pds
;
dev
->
caps
.
mtt_entry_sz
=
MLX4_MTT_ENTRY_PER_SEG
*
dev_cap
->
mtt_entry_sz
;
dev
->
caps
.
mtt_entry_sz
=
dev
->
caps
.
mtts_per_seg
*
dev_cap
->
mtt_entry_sz
;
dev
->
caps
.
max_msg_sz
=
dev_cap
->
max_msg_sz
;
dev
->
caps
.
page_size_cap
=
~
(
u32
)
(
dev_cap
->
min_page_sz
-
1
);
dev
->
caps
.
flags
=
dev_cap
->
flags
;
...
...
@@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void)
return
-
1
;
}
if
((
log_mtts_per_seg
<
1
)
||
(
log_mtts_per_seg
>
5
))
{
printk
(
KERN_WARNING
"mlx4_core: bad log_mtts_per_seg: %d
\n
"
,
log_mtts_per_seg
);
return
-
1
;
}
return
0
;
}
...
...
drivers/net/mlx4/mr.c
View file @
8d34ff34
...
...
@@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
}
else
mtt
->
page_shift
=
page_shift
;
for
(
mtt
->
order
=
0
,
i
=
MLX4_MTT_ENTRY_PER_SEG
;
i
<
npages
;
i
<<=
1
)
for
(
mtt
->
order
=
0
,
i
=
dev
->
caps
.
mtts_per_seg
;
i
<
npages
;
i
<<=
1
)
++
mtt
->
order
;
mtt
->
first_seg
=
mlx4_alloc_mtt_range
(
dev
,
mtt
->
order
);
...
...
@@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mpt_entry
->
pd_flags
|=
cpu_to_be32
(
MLX4_MPT_PD_FLAG_FAST_REG
|
MLX4_MPT_PD_FLAG_RAE
);
mpt_entry
->
mtt_sz
=
cpu_to_be32
((
1
<<
mr
->
mtt
.
order
)
*
MLX4_MTT_ENTRY_PER_SEG
);
dev
->
caps
.
mtts_per_seg
);
}
else
{
mpt_entry
->
flags
|=
cpu_to_be32
(
MLX4_MPT_FLAG_SW_OWNS
);
}
...
...
@@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
(
start_index
+
npages
-
1
)
/
(
PAGE_SIZE
/
sizeof
(
u64
)))
return
-
EINVAL
;
if
(
start_index
&
(
MLX4_MTT_ENTRY_PER_SEG
-
1
))
if
(
start_index
&
(
dev
->
caps
.
mtts_per_seg
-
1
))
return
-
EINVAL
;
mtts
=
mlx4_table_find
(
&
priv
->
mr_table
.
mtt_table
,
mtt
->
first_seg
+
...
...
drivers/net/mlx4/profile.c
View file @
8d34ff34
...
...
@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile
[
MLX4_RES_EQ
].
size
=
dev_cap
->
eqc_entry_sz
;
profile
[
MLX4_RES_DMPT
].
size
=
dev_cap
->
dmpt_entry_sz
;
profile
[
MLX4_RES_CMPT
].
size
=
dev_cap
->
cmpt_entry_sz
;
profile
[
MLX4_RES_MTT
].
size
=
MLX4_MTT_ENTRY_PER_SEG
*
dev_cap
->
mtt_entry_sz
;
profile
[
MLX4_RES_MTT
].
size
=
dev
->
caps
.
mtts_per_seg
*
dev_cap
->
mtt_entry_sz
;
profile
[
MLX4_RES_MCG
].
size
=
MLX4_MGM_ENTRY_SIZE
;
profile
[
MLX4_RES_QP
].
num
=
request
->
num_qp
;
...
...
include/linux/mlx4/device.h
View file @
8d34ff34
...
...
@@ -210,6 +210,7 @@ struct mlx4_caps {
int
num_comp_vectors
;
int
num_mpts
;
int
num_mtt_segs
;
int
mtts_per_seg
;
int
fmr_reserved_mtts
;
int
reserved_mtts
;
int
reserved_mrws
;
...
...
include/linux/mlx4/qp.h
View file @
8d34ff34
...
...
@@ -165,6 +165,7 @@ enum {
MLX4_WQE_CTRL_IP_CSUM
=
1
<<
4
,
MLX4_WQE_CTRL_TCP_UDP_CSUM
=
1
<<
5
,
MLX4_WQE_CTRL_INS_VLAN
=
1
<<
6
,
MLX4_WQE_CTRL_STRONG_ORDER
=
1
<<
7
,
};
struct
mlx4_wqe_ctrl_seg
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment