Commit 17fb4f29 authored by Jubin John's avatar Jubin John Committed by Doug Ledford

staging/rdma/hfi1: Fix code alignment

Fix code alignment to fix checkpatch check:
CHECK: Alignment should match open parenthesis
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarJubin John <jubin.john@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 4d114fdd
......@@ -1147,11 +1147,8 @@ struct cntr_entry {
/*
* accessor for stat element, context either dd or ppd
*/
u64 (*rw_cntr)(const struct cntr_entry *,
void *context,
int vl,
int mode,
u64 data);
u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
int mode, u64 data);
};
#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
......@@ -1501,7 +1498,8 @@ static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
}
static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
void *context, int vl, int mode, u64 data)
void *context, int vl, int mode,
u64 data)
{
struct hfi1_pportdata *ppd = context;
......@@ -5214,9 +5212,7 @@ static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
if (source < ARRAY_SIZE(cce_misc_names))
strncpy(buf, cce_misc_names[source], bsize);
else
snprintf(buf,
bsize,
"Reserved%u",
snprintf(buf, bsize, "Reserved%u",
source + IS_GENERAL_ERR_START);
return buf;
......@@ -5341,13 +5337,15 @@ static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
cce_err_status_flags,
ARRAY_SIZE(cce_err_status_flags));
}
static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
rxe_err_status_flags,
ARRAY_SIZE(rxe_err_status_flags));
}
static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
......@@ -5359,7 +5357,8 @@ static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
pio_err_status_flags,
ARRAY_SIZE(pio_err_status_flags));
}
static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
......@@ -5372,13 +5371,15 @@ static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
egress_err_status_flags,
ARRAY_SIZE(egress_err_status_flags));
}
static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
egress_err_info_flags,
ARRAY_SIZE(egress_err_info_flags));
}
static char *send_err_status_string(char *buf, int buf_len, u64 flags)
......@@ -5430,8 +5431,8 @@ static void update_rcverr_timer(unsigned long opaque)
if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
set_link_down_reason(ppd,
OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
set_link_down_reason(
ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
}
......@@ -5790,7 +5791,8 @@ static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
sc_err_status_flags,
ARRAY_SIZE(sc_err_status_flags));
}
/*
......@@ -5833,7 +5835,8 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
send_context_err_status_string(flags, sizeof(flags), status));
send_context_err_status_string(flags, sizeof(flags),
status));
if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
......@@ -6010,8 +6013,8 @@ static int request_host_lcb_access(struct hfi1_devdata *dd)
int ret;
ret = do_8051_command(dd, HCMD_MISC,
(u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
NULL);
(u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
LOAD_DATA_FIELD_ID_SHIFT, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd, "%s: command failed with error %d\n",
__func__, ret);
......@@ -6024,8 +6027,8 @@ static int request_8051_lcb_access(struct hfi1_devdata *dd)
int ret;
ret = do_8051_command(dd, HCMD_MISC,
(u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
NULL);
(u64)HCMD_MISC_GRANT_LCB_ACCESS <<
LOAD_DATA_FIELD_ID_SHIFT, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd, "%s: command failed with error %d\n",
__func__, ret);
......@@ -6040,8 +6043,8 @@ static int request_8051_lcb_access(struct hfi1_devdata *dd)
static inline void set_host_lcb_access(struct hfi1_devdata *dd)
{
write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
| DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
}
/*
......@@ -6174,9 +6177,10 @@ static void init_lcb_access(struct hfi1_devdata *dd)
static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
{
write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
| (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
| (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
(u64)return_code <<
DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
(u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
}
/*
......@@ -6272,11 +6276,11 @@ static void write_global_credit(struct hfi1_devdata *dd,
u8 vau, u16 total, u16 shared)
{
write_csr(dd, SEND_CM_GLOBAL_CREDIT,
((u64)total
<< SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
| ((u64)shared
<< SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
| ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
((u64)total <<
SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
((u64)shared <<
SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
}
/*
......@@ -6359,10 +6363,9 @@ static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
/* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
reg = read_csr(dd, DCC_CFG_RESET);
write_csr(dd, DCC_CFG_RESET,
reg
| (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
| (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
write_csr(dd, DCC_CFG_RESET, reg |
(1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
(1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
(void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
if (!abort) {
udelay(1); /* must hold for the longer of 16cclks or 20ns */
......@@ -6665,8 +6668,7 @@ static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
if (time_after(jiffies, timeout)) {
dd_dev_err(dd,
"Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
freeze ? "" : "un",
reg & ALL_FROZE,
freeze ? "" : "un", reg & ALL_FROZE,
freeze ? ALL_FROZE : 0ull);
return;
}
......@@ -7166,12 +7168,7 @@ void handle_verify_cap(struct work_struct *work)
*/
read_vc_remote_phy(dd, &power_management, &continious);
read_vc_remote_fabric(
dd,
&vau,
&z,
&vcu,
&vl15buf,
read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
&partner_supported_crc);
read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
read_remote_device_id(dd, &device_id, &device_rev);
......@@ -7187,10 +7184,7 @@ void handle_verify_cap(struct work_struct *work)
(int)power_management, (int)continious);
dd_dev_info(dd,
"Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
(int)vau,
(int)z,
(int)vcu,
(int)vl15buf,
(int)vau, (int)z, (int)vcu, (int)vl15buf,
(int)partner_supported_crc);
dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
(u32)remote_tx_rate, (u32)link_widths);
......@@ -7369,8 +7363,10 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
/* downgrade is disabled */
/* bounce if not at starting active width */
if ((ppd->link_width_active != ppd->link_width_downgrade_tx_active) ||
(ppd->link_width_active != ppd->link_width_downgrade_rx_active)) {
if ((ppd->link_width_active !=
ppd->link_width_downgrade_tx_active) ||
(ppd->link_width_active !=
ppd->link_width_downgrade_rx_active)) {
dd_dev_err(ppd->dd,
"Link downgrade is disabled and link has downgraded, downing link\n");
dd_dev_err(ppd->dd,
......@@ -7387,8 +7383,7 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
"Link is outside of downgrade allowed, downing link\n");
dd_dev_err(ppd->dd,
" enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
lwde,
ppd->link_width_downgrade_tx_active,
lwde, ppd->link_width_downgrade_tx_active,
ppd->link_width_downgrade_rx_active);
do_bounce = 1;
}
......@@ -7483,7 +7478,8 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd_dev_info(dd, "Link error: %s\n",
dc8051_info_err_string(buf,
sizeof(buf),
err & FAILED_LNI));
err &
FAILED_LNI));
}
err &= ~(u64)FAILED_LNI;
}
......@@ -7495,7 +7491,8 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
if (err) {
/* report remaining errors, but do not do anything */
dd_dev_err(dd, "8051 info error: %s\n",
dc8051_info_err_string(buf, sizeof(buf), err));
dc8051_info_err_string(buf, sizeof(buf),
err));
}
/*
......@@ -7548,7 +7545,8 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
if (host_msg) {
/* report remaining messages, but do not do anything */
dd_dev_info(dd, "8051 info host message: %s\n",
dc8051_info_host_msg_string(buf, sizeof(buf),
dc8051_info_host_msg_string(buf,
sizeof(buf),
host_msg));
}
......@@ -7562,8 +7560,8 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
*/
dd_dev_err(dd, "Lost 8051 heartbeat\n");
write_csr(dd, DC_DC8051_ERR_EN,
read_csr(dd, DC_DC8051_ERR_EN)
& ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
read_csr(dd, DC_DC8051_ERR_EN) &
~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
}
......@@ -8693,8 +8691,7 @@ static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
for (lane = 0; lane < 4; lane++) {
ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
if (ret) {
dd_dev_err(
dd,
dd_dev_err(dd,
"Unable to read lane %d firmware details\n",
lane);
continue;
......@@ -8718,8 +8715,7 @@ static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
{
int ret;
ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
type, data_out);
ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd, "read idle message: type %d, err %d\n",
(u32)type, ret);
......@@ -8739,8 +8735,8 @@ static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
*/
static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
{
return read_idle_message(dd,
(u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
data);
}
/*
......@@ -8771,8 +8767,8 @@ int send_idle_sma(struct hfi1_devdata *dd, u64 message)
{
u64 data;
data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
| ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
return send_idle_message(dd, data);
}
......@@ -8811,8 +8807,7 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
/* watch LCB_STS_LINK_TRANSFER_ACTIVE */
timeout = jiffies + msecs_to_jiffies(10);
while (1) {
reg = read_csr(dd,
DC_LCB_STS_LINK_TRANSFER_ACTIVE);
reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
if (reg)
break;
if (time_after(jiffies, timeout)) {
......@@ -8838,8 +8833,7 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
dd_dev_err(dd,
"Pausing for peer to be finished with LCB set up\n");
msleep(5000);
dd_dev_err(dd,
"Continuing with quick linkup\n");
dd_dev_err(dd, "Continuing with quick linkup\n");
}
write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
......@@ -9010,7 +9004,8 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
/*
* DC supports continuous updates.
*/
ret = write_vc_local_phy(dd, 0 /* no power management */,
ret = write_vc_local_phy(dd,
0 /* no power management */,
1 /* continuous updates */);
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
......@@ -9022,7 +9017,8 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
goto set_local_link_attributes_fail;
ret = write_vc_local_link_width(dd, 0, 0,
opa_to_vc_link_widths(ppd->link_width_enabled));
opa_to_vc_link_widths(
ppd->link_width_enabled));
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
......@@ -9121,11 +9117,10 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
mask = (u64)QSFP_HFI0_RESET_N;
qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
qsfp_mask |= mask;
write_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
qsfp_mask = read_csr(dd, dd->hfi1_id ?
ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
qsfp_mask = read_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
qsfp_mask &= ~mask;
write_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
......@@ -9152,100 +9147,84 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
(qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
dd_dev_info(dd,
"%s: QSFP cable on fire\n",
dd_dev_info(dd, "%s: QSFP cable on fire\n",
__func__);
if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
(qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
dd_dev_info(dd,
"%s: QSFP cable temperature too low\n",
dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
__func__);
if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
(qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
dd_dev_info(dd,
"%s: QSFP supply voltage too high\n",
dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
__func__);
if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
(qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
dd_dev_info(dd,
"%s: QSFP supply voltage too low\n",
dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
__func__);
/* Byte 2 is vendor specific */
if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
(qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
dd_dev_info(dd,
"%s: Cable RX channel 1/2 power too high\n",
dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
__func__);
if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
(qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
dd_dev_info(dd,
"%s: Cable RX channel 1/2 power too low\n",
dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
__func__);
if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
(qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
dd_dev_info(dd,
"%s: Cable RX channel 3/4 power too high\n",
dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
__func__);
if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
(qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
dd_dev_info(dd,
"%s: Cable RX channel 3/4 power too low\n",
dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
__func__);
if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
(qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
dd_dev_info(dd,
"%s: Cable TX channel 1/2 bias too high\n",
dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
__func__);
if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
(qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
dd_dev_info(dd,
"%s: Cable TX channel 1/2 bias too low\n",
dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
__func__);
if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
(qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
dd_dev_info(dd,
"%s: Cable TX channel 3/4 bias too high\n",
dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
__func__);
if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
(qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
dd_dev_info(dd,
"%s: Cable TX channel 3/4 bias too low\n",
dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
__func__);
if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
(qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
dd_dev_info(dd,
"%s: Cable TX channel 1/2 power too high\n",
dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
__func__);
if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
(qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
dd_dev_info(dd,
"%s: Cable TX channel 1/2 power too low\n",
dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
__func__);
if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
(qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
dd_dev_info(dd,
"%s: Cable TX channel 3/4 power too high\n",
dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
__func__);
if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
(qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
dd_dev_info(dd,
"%s: Cable TX channel 3/4 power too low\n",
dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
__func__);
/* Bytes 9-10 and 11-12 are reserved */
......@@ -9702,11 +9681,14 @@ static void set_send_length(struct hfi1_pportdata *ppd)
/* all kernel receive contexts have the same hdrqentsize */
for (i = 0; i < ppd->vls_supported; i++) {
sc_set_cr_threshold(dd->vld[i].sc,
sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
dd->rcd[0]->rcvhdrqentsize));
sc_mtu_to_threshold(dd->vld[i].sc,
dd->vld[i].mtu,
dd->rcd[0]->
rcvhdrqentsize));
}
sc_set_cr_threshold(dd->vld[15].sc,
sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
sc_mtu_to_threshold(dd->vld[15].sc,
dd->vld[15].mtu,
dd->rcd[0]->rcvhdrqentsize));
/* Adjust maximum MTU for the port in DC */
......@@ -10071,7 +10053,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
* simulator jumps from polling to link up.
* Accept that here.
*/
/* OK */;
/* OK */
} else if (ppd->host_link_state != HLS_GOING_UP) {
goto unexpected;
}
......@@ -10845,7 +10827,8 @@ int set_buffer_control(struct hfi1_pportdata *ppd,
if (lowering_dedicated[i]) {
set_vl_dedicated(dd, i,
be16_to_cpu(new_bc->vl[i].dedicated));
be16_to_cpu(new_bc->
vl[i].dedicated));
cur_bc.vl[i].dedicated =
new_bc->vl[i].dedicated;
}
......@@ -10861,7 +10844,8 @@ int set_buffer_control(struct hfi1_pportdata *ppd,
if (be16_to_cpu(new_bc->vl[i].dedicated) >
be16_to_cpu(cur_bc.vl[i].dedicated))
set_vl_dedicated(dd, i,
be16_to_cpu(new_bc->vl[i].dedicated));
be16_to_cpu(new_bc->
vl[i].dedicated));
}
}
......@@ -11152,7 +11136,8 @@ static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
* been verified to be in range
*/
write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
(u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
(u64)timeout <<
RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
}
void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
......@@ -11863,8 +11848,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
dev_cntrs[i].offset = dd->ndevcntrs;
for (j = 0; j < C_VL_COUNT; j++) {
snprintf(name, C_MAX_NAME, "%s%d",
dev_cntrs[i].name,
vl_from_idx(j));
dev_cntrs[i].name, vl_from_idx(j));
sz += strlen(name);
/* Add ",32" for 32-bit counters */
if (dev_cntrs[i].flags & CNTR_32BIT)
......@@ -11987,8 +11971,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
port_cntrs[i].offset = dd->nportcntrs;
for (j = 0; j < C_VL_COUNT; j++) {
snprintf(name, C_MAX_NAME, "%s%d",
port_cntrs[i].name,
vl_from_idx(j));
port_cntrs[i].name, vl_from_idx(j));
sz += strlen(name);
/* Add ",32" for 32-bit counters */
if (port_cntrs[i].flags & CNTR_32BIT)
......@@ -12021,8 +12004,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
if (port_cntrs[i].flags & CNTR_VL) {
for (j = 0; j < C_VL_COUNT; j++) {
snprintf(name, C_MAX_NAME, "%s%d",
port_cntrs[i].name,
vl_from_idx(j));
port_cntrs[i].name, vl_from_idx(j));
memcpy(p, name, strlen(name));
p += strlen(name);
......@@ -13120,8 +13102,7 @@ static void reset_txe_csrs(struct hfi1_devdata *dd)
for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
write_csr(dd, SEND_CM_GLOBAL_CREDIT,
SEND_CM_GLOBAL_CREDIT_RESETCSR);
write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
/* SEND_CM_CREDIT_USED_STATUS read-only */
write_csr(dd, SEND_CM_TIMER_CTRL, 0);
write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
......@@ -13318,8 +13299,8 @@ static void reset_rxe_csrs(struct hfi1_devdata *dd)
write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
/* RCV_EGR_OFFSET_TAIL read-only */
for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
0);
write_uctxt_csr(dd, i,
RCV_TID_FLOW_TABLE + (8 * j), 0);
}
}
}
......@@ -13519,12 +13500,12 @@ static void init_kdeth_qp(struct hfi1_devdata *dd)
kdeth_qp = DEFAULT_KDETH_QP;
write_csr(dd, SEND_BTH_QP,
(kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
<< SEND_BTH_QP_KDETH_QP_SHIFT);
(kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
SEND_BTH_QP_KDETH_QP_SHIFT);
write_csr(dd, RCV_BTH_QP,
(kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
<< RCV_BTH_QP_KDETH_QP_SHIFT);
(kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
RCV_BTH_QP_KDETH_QP_SHIFT);
}
/**
......@@ -13649,12 +13630,11 @@ static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
/* add rule0 */
write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
<< RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK <<
RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
LRH_BTH_MATCH_OFFSET
<< RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
LRH_BTH_MATCH_OFFSET << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
......@@ -13682,9 +13662,8 @@ static void init_rxe(struct hfi1_devdata *dd)
/* enable all receive errors */
write_csr(dd, RCV_ERR_MASK, ~0ull);
/* setup QPN map table - start where VL15 context leaves off */
init_qos(
dd,
dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
init_qos(dd, dd->n_krcv_queues > MIN_KERNEL_KCTXTS ?
MIN_KERNEL_KCTXTS : 0);
/*
* make sure RcvCtrl.RcvWcb <= PCIe Device Control
* Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
......@@ -13721,22 +13700,20 @@ static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
u32 csr0to3, u32 csr4to7)
{
write_csr(dd, csr0to3,
0ull <<
SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
| 1ull <<
SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
| 2ull * cu <<
SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
| 4ull * cu <<
0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
2ull * cu <<
SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
4ull * cu <<
SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
write_csr(dd, csr4to7,
8ull * cu <<
SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
| 16ull * cu <<
SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
| 32ull * cu <<
SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
| 64ull * cu <<
SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
16ull * cu <<
SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
32ull * cu <<
SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
64ull * cu <<
SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
}
......@@ -13995,8 +13972,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
"Functional simulator"
};
dd = hfi1_alloc_devdata(pdev,
NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
sizeof(struct hfi1_pportdata));
if (IS_ERR(dd))
goto bail;
ppd = dd->pport;
......@@ -14083,8 +14060,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
(int)dd->irev);
dd->icode < ARRAY_SIZE(inames) ?
inames[dd->icode] : "unknown", (int)dd->irev);
/* speeds the hardware can support */
dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
......
......@@ -701,7 +701,8 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
*/
prefetch_range(packet->ebuf,
packet->tlen - ((packet->rcd->rcvhdrqentsize -
(rhf_hdrq_offset(packet->rhf) + 2)) * 4));
(rhf_hdrq_offset(packet->rhf)
+ 2)) * 4));
}
/*
......@@ -958,9 +959,9 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
prescan_rxq(rcd, &packet);
while (last == RCV_PKT_OK) {
if (unlikely(dd->do_drop && atomic_xchg(&dd->drop_packet,
DROP_PACKET_OFF) == DROP_PACKET_ON)) {
if (unlikely(dd->do_drop &&
atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
DROP_PACKET_ON)) {
dd->do_drop = 0;
/* On to the next packet */
......@@ -990,8 +991,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
if (seq != rcd->seq_cnt)
last = RCV_PKT_DONE;
if (needset) {
dd_dev_info(dd,
"Switching to NO_DMA_RTAIL\n");
dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
set_all_nodma_rtail(dd);
needset = 0;
}
......
......@@ -115,11 +115,9 @@ static DEFINE_MUTEX(eprom_mutex);
static void write_enable(struct hfi1_devdata *dd)
{
/* raise signal */
write_csr(dd, ASIC_GPIO_OUT,
read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
/* raise enable */
write_csr(dd, ASIC_GPIO_OE,
read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
}
/*
......@@ -128,11 +126,9 @@ static void write_enable(struct hfi1_devdata *dd)
static void write_disable(struct hfi1_devdata *dd)
{
/* lower signal */
write_csr(dd, ASIC_GPIO_OUT,
read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
/* lower enable */
write_csr(dd, ASIC_GPIO_OE,
read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
}
/*
......@@ -472,8 +468,7 @@ int eprom_init(struct hfi1_devdata *dd)
/* reset EPROM to be sure it is in a good state */
/* set reset */
write_csr(dd, ASIC_EEP_CTL_STAT,
ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
/* clear reset, set speed */
write_csr(dd, ASIC_EEP_CTL_STAT,
EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
......
......@@ -1145,8 +1145,8 @@ static int user_init(struct file *fp)
* has done it.
*/
if (fd->subctxt) {
ret = wait_event_interruptible(uctxt->wait,
!test_bit(HFI1_CTXT_MASTER_UNINIT,
ret = wait_event_interruptible(uctxt->wait, !test_bit(
HFI1_CTXT_MASTER_UNINIT,
&uctxt->event_flags));
goto expected;
}
......
......@@ -393,13 +393,19 @@ static int invalid_header(struct hfi1_devdata *dd, const char *what,
static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css)
{
/* verify CSS header fields (most sizes are in DW, so add /4) */
if (invalid_header(dd, "module_type", css->module_type, CSS_MODULE_TYPE) ||
invalid_header(dd, "header_len", css->header_len, (sizeof(struct firmware_file) / 4)) ||
invalid_header(dd, "header_version", css->header_version, CSS_HEADER_VERSION) ||
invalid_header(dd, "module_vendor", css->module_vendor, CSS_MODULE_VENDOR) ||
if (invalid_header(dd, "module_type", css->module_type,
CSS_MODULE_TYPE) ||
invalid_header(dd, "header_len", css->header_len,
(sizeof(struct firmware_file) / 4)) ||
invalid_header(dd, "header_version", css->header_version,
CSS_HEADER_VERSION) ||
invalid_header(dd, "module_vendor", css->module_vendor,
CSS_MODULE_VENDOR) ||
invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) ||
invalid_header(dd, "modulus_size", css->modulus_size, KEY_SIZE / 4) ||
invalid_header(dd, "exponent_size", css->exponent_size, EXPONENT_SIZE / 4)) {
invalid_header(dd, "modulus_size", css->modulus_size,
KEY_SIZE / 4) ||
invalid_header(dd, "exponent_size", css->exponent_size,
EXPONENT_SIZE / 4)) {
return -EINVAL;
}
return 0;
......@@ -532,7 +538,8 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
/* css->size check failed */
dd_dev_err(dd,
"invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n",
fdet->fw->size / 4, (fdet->fw->size - AUGMENT_SIZE) / 4,
fdet->fw->size / 4,
(fdet->fw->size - AUGMENT_SIZE) / 4,
css->size);
ret = -EINVAL;
......@@ -908,8 +915,8 @@ static int run_rsa(struct hfi1_devdata *dd, const char *who,
* is not keeping the error high.
*/
write_csr(dd, MISC_ERR_CLEAR,
MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK
| MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK |
MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
/*
* All that is left are the current errors. Print warnings on
* authorization failure details, if any. Firmware authorization
......@@ -938,7 +945,8 @@ static void load_security_variables(struct hfi1_devdata *dd,
write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE);
/* Security variables d. Write the header */
write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD,
(u8 *)fdet->css_header, sizeof(struct css_header));
(u8 *)fdet->css_header,
sizeof(struct css_header));
}
/* return the 8051 firmware state */
......@@ -1066,11 +1074,11 @@ void sbus_request(struct hfi1_devdata *dd,
u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
{
write_csr(dd, ASIC_CFG_SBUS_REQUEST,
((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT)
| ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT)
| ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT)
| ((u64)receiver_addr
<< ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) |
((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) |
((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) |
((u64)receiver_addr <<
ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
}
/*
......@@ -1551,8 +1559,8 @@ int parse_platform_config(struct hfi1_devdata *dd)
header2 = *(ptr + 1);
if (header1 != ~header2) {
dd_dev_info(dd, "%s: Failed validation at offset %ld\n",
__func__, (ptr -
(u32 *)dd->platform_config.data));
__func__, (ptr - (u32 *)
dd->platform_config.data));
goto bail;
}
......@@ -1597,7 +1605,8 @@ int parse_platform_config(struct hfi1_devdata *dd)
dd_dev_info(dd,
"%s: Unknown data table %d, offset %ld\n",
__func__, table_type,
(ptr - (u32 *)dd->platform_config.data));
(ptr - (u32 *)
dd->platform_config.data));
goto bail; /* We don't trust this file now */
}
pcfgcache->config_tables[table_type].table = ptr;
......@@ -1654,7 +1663,8 @@ int parse_platform_config(struct hfi1_devdata *dd)
}
static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
int field, u32 *field_len_bits, u32 *field_start_bits)
int field, u32 *field_len_bits,
u32 *field_start_bits)
{
struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
u32 *src_ptr = NULL;
......@@ -1714,8 +1724,9 @@ static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
* @len: length of memory pointed by @data in bytes.
*/
int get_platform_config_field(struct hfi1_devdata *dd,
enum platform_config_table_type_encoding table_type,
int table_index, int field_index, u32 *data, u32 len)
enum platform_config_table_type_encoding
table_type, int table_index, int field_index,
u32 *data, u32 len)
{
int ret = 0, wlen = 0, seek = 0;
u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL;
......@@ -1727,7 +1738,8 @@ int get_platform_config_field(struct hfi1_devdata *dd,
return -EINVAL;
ret = get_platform_fw_field_metadata(dd, table_type, field_index,
&field_len_bits, &field_start_bits);
&field_len_bits,
&field_start_bits);
if (ret)
return -EINVAL;
......
......@@ -1715,8 +1715,9 @@ void restore_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd);
int get_platform_config_field(struct hfi1_devdata *dd,
enum platform_config_table_type_encoding table_type,
int table_index, int field_index, u32 *data, u32 len);
enum platform_config_table_type_encoding
table_type, int table_index, int field_index,
u32 *data, u32 len);
const char *get_unit_name(int unit);
const char *get_card_name(struct rvt_dev_info *rdi);
......
......@@ -135,16 +135,14 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
set_up_vl15(dd, dd->vau, dd->vl15_init);
assign_remote_cm_au_table(dd, dd->vcu);
ppd->neighbor_guid =
read_csr(dd,
DC_DC8051_STS_REMOTE_GUID);
read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
ppd->neighbor_type =
read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
ppd->neighbor_port_number =
read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
dd_dev_info(dd,
"Neighbor GUID: %llx Neighbor type %d\n",
dd_dev_info(dd, "Neighbor GUID: %llx Neighbor type %d\n",
ppd->neighbor_guid,
ppd->neighbor_type);
}
......
......@@ -981,9 +981,8 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
link_state = HLS_DN_DOWNDEF;
else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
link_state = HLS_DN_POLL;
set_link_down_reason(ppd,
OPA_LINKDOWN_REASON_FM_BOUNCE, 0,
OPA_LINKDOWN_REASON_FM_BOUNCE);
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE,
0, OPA_LINKDOWN_REASON_FM_BOUNCE);
} else if (phys_state == IB_PORTPHYSSTATE_DISABLED)
link_state = HLS_DN_DISABLE;
else {
......@@ -1185,7 +1184,9 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
if (lwe == OPA_LINK_WIDTH_RESET ||
lwe == OPA_LINK_WIDTH_RESET_OLD) {
set_link_width_downgrade_enabled(ppd,
ppd->link_width_downgrade_supported);
ppd->
link_width_downgrade_supported
);
} else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
/* only set and apply if something changed */
if (lwe != ppd->link_width_downgrade_enabled) {
......@@ -1216,10 +1217,11 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
}
for (i = 0; i < ppd->vls_supported; i++) {
if ((i % 2) == 0)
mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >> 4)
& 0xF);
mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >>
4) & 0xF);
else
mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] & 0xF);
mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] &
0xF);
if (mtu == 0xffff) {
pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
mtu,
......@@ -2320,7 +2322,8 @@ static void a0_portstatus(struct hfi1_pportdata *ppd,
}
static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
struct ib_device *ibdev, u8 port, u32 *resp_len)
struct ib_device *ibdev,
u8 port, u32 *resp_len)
{
struct opa_port_status_req *req =
(struct opa_port_status_req *)pmp->data;
......@@ -2563,7 +2566,8 @@ static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
}
static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
struct ib_device *ibdev, u8 port, u32 *resp_len)
struct ib_device *ibdev,
u8 port, u32 *resp_len)
{
struct opa_port_data_counters_msg *req =
(struct opa_port_data_counters_msg *)pmp->data;
......@@ -2777,7 +2781,8 @@ static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
}
static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
struct ib_device *ibdev, u8 port, u32 *resp_len)
struct ib_device *ibdev,
u8 port, u32 *resp_len)
{
size_t response_data_size;
struct _port_ectrs *rsp;
......@@ -2950,7 +2955,8 @@ static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
}
static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
struct ib_device *ibdev, u8 port, u32 *resp_len)
struct ib_device *ibdev,
u8 port, u32 *resp_len)
{
size_t response_data_size;
struct _port_ei *rsp;
......@@ -3047,7 +3053,8 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
}
static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
struct ib_device *ibdev, u8 port, u32 *resp_len)
struct ib_device *ibdev,
u8 port, u32 *resp_len)
{
struct opa_clear_port_status *req =
(struct opa_clear_port_status *)pmp->data;
......@@ -3194,7 +3201,8 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
}
static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
struct ib_device *ibdev, u8 port, u32 *resp_len)
struct ib_device *ibdev,
u8 port, u32 *resp_len)
{
struct _port_ei *rsp;
struct opa_port_error_info_msg *req;
......@@ -3295,8 +3303,7 @@ static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
u8 *data,
struct ib_device *ibdev,
u8 *data, struct ib_device *ibdev,
u8 port, u32 *resp_len)
{
int i;
......
......@@ -219,8 +219,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
&dd->pcie_devctl2);
pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
&dd->pci_lnkctl3);
pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, &dd->pci_lnkctl3);
pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
return 0;
......@@ -432,19 +431,15 @@ void hfi1_enable_intx(struct pci_dev *pdev)
void restore_pci_variables(struct hfi1_devdata *dd)
{
pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
pci_write_config_dword(dd->pcidev,
PCI_BASE_ADDRESS_0, dd->pcibar0);
pci_write_config_dword(dd->pcidev,
PCI_BASE_ADDRESS_1, dd->pcibar1);
pci_write_config_dword(dd->pcidev,
PCI_ROM_ADDRESS, dd->pci_rom);
pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, dd->pcibar0);
pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, dd->pcibar1);
pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom);
pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, dd->pcie_devctl);
pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, dd->pcie_lnkctl);
pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
dd->pcie_devctl2);
pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
dd->pci_lnkctl3);
pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, dd->pci_lnkctl3);
pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
}
......@@ -758,7 +753,8 @@ static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
dd_dev_err(dd, " prec attn post\n");
}
dd_dev_err(dd, " p%02d: %02x %02x %02x\n",
i, (u32)eq[i][0], (u32)eq[i][1], (u32)eq[i][2]);
i, (u32)eq[i][0], (u32)eq[i][1],
(u32)eq[i][2]);
dd_dev_err(dd, " %02x %02x %02x\n",
(u32)c_minus1, (u32)c0, (u32)c_plus1);
hit_error = 1;
......@@ -840,8 +836,8 @@ static void write_gasket_interrupt(struct hfi1_devdata *dd, int index,
u16 code, u16 data)
{
write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8),
(((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT)
| ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT)));
(((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) |
((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT)));
}
/*
......@@ -851,14 +847,13 @@ static void arm_gasket_logic(struct hfi1_devdata *dd)
{
u64 reg;
reg = (((u64)1 << dd->hfi1_id)
<< ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT)
| ((u64)pcie_serdes_broadcast[dd->hfi1_id]
<< ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT
| ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK
| ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK)
<< ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT
);
reg = (((u64)1 << dd->hfi1_id) <<
ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) |
((u64)pcie_serdes_broadcast[dd->hfi1_id] <<
ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT |
ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK |
((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) <<
ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT);
write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg);
/* read back to push the write */
read_csr(dd, ASIC_PCIE_SD_HOST_CMD);
......@@ -1087,8 +1082,10 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
default_pset = DEFAULT_MCP_PSET;
}
pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
(fs << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT)
| (lf << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT));
(fs <<
PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) |
(lf <<
PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT));
ret = load_eq_table(dd, eq, fs, div);
if (ret)
goto done;
......@@ -1107,10 +1104,10 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
}
dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pcie_pset);
pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106,
((1 << pcie_pset)
<< PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT)
| PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK
| PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK);
((1 << pcie_pset) <<
PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) |
PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK |
PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK);
/*
* step 5b: Do post firmware download steps via SBus
......
......@@ -766,8 +766,8 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
/* set the default partition key */
write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
(DEFAULT_PKEY &
SC(CHECK_PARTITION_KEY_VALUE_MASK))
<< SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
SC(CHECK_PARTITION_KEY_VALUE_MASK)) <<
SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
/* per context type checks */
if (type == SC_USER) {
......@@ -1237,8 +1237,7 @@ int sc_enable(struct send_context *sc)
*/
reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
if (reg)
write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR),
reg);
write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
/*
* The HW PIO initialization engine can handle only one init
......@@ -2034,8 +2033,8 @@ void free_credit_return(struct hfi1_devdata *dd)
for (i = 0; i < num_numa; i++) {
if (dd->cr_base[i].va) {
dma_free_coherent(&dd->pcidev->dev,
TXE_NUM_CONTEXTS
* sizeof(struct credit_return),
TXE_NUM_CONTEXTS *
sizeof(struct credit_return),
dd->cr_base[i].va,
dd->cr_base[i].pa);
}
......
......@@ -1773,8 +1773,8 @@ static inline void rc_cancel_ack(struct rvt_qp *qp)
* schedule a response to be sent.
*/
static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
struct rvt_qp *qp, u32 opcode, u32 psn, int diff,
struct hfi1_ctxtdata *rcd)
struct rvt_qp *qp, u32 opcode, u32 psn,
int diff, struct hfi1_ctxtdata *rcd)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct rvt_ack_entry *e;
......
......@@ -283,7 +283,8 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
guid))
goto err;
if (!gid_ok(&hdr->u.l.grh.sgid,
if (!gid_ok(
&hdr->u.l.grh.sgid,
qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
qp->alt_ah_attr.grh.dgid.global.interface_id))
goto err;
......@@ -317,7 +318,8 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
guid))
goto err;
if (!gid_ok(&hdr->u.l.grh.sgid,
if (!gid_ok(
&hdr->u.l.grh.sgid,
qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
qp->remote_ah_attr.grh.dgid.global.interface_id))
goto err;
......
......@@ -1622,12 +1622,10 @@ static void sdma_setlengen(struct sdma_engine *sde)
* generation counter.
*/
write_sde_csr(sde, SD(LEN_GEN),
(sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)
);
(sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
write_sde_csr(sde, SD(LEN_GEN),
((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT))
| (4ULL << SD(LEN_GEN_GENERATION_SHIFT))
);
((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
(4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
}
static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
......@@ -1707,8 +1705,7 @@ static void init_sdma_regs(
write_sde_csr(sde, SD(DESC_CNT), 0);
write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
write_sde_csr(sde, SD(MEMORY),
((u64)credits <<
SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
((u64)(credits * sde->this_idx) <<
SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
......@@ -1797,10 +1794,7 @@ static void dump_sdma_state(struct sdma_engine *sde)
dd_dev_err(sde->dd,
"SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
sde->this_idx,
head,
tail,
cnt,
sde->this_idx, head, tail, cnt,
!list_empty(&sde->flushlist));
/* print info for each entry in the descriptor queue */
......@@ -1830,12 +1824,15 @@ static void dump_sdma_state(struct sdma_engine *sde)
if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
dd_dev_err(sde->dd,
"\taidx: %u amode: %u alen: %u\n",
(u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
>> SDMA_DESC1_HEADER_INDEX_SHIFT),
(u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
>> SDMA_DESC1_HEADER_MODE_SHIFT),
(u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
>> SDMA_DESC1_HEADER_DWS_SHIFT));
(u8)((desc[1] &
SDMA_DESC1_HEADER_INDEX_SMASK) >>
SDMA_DESC1_HEADER_INDEX_SHIFT),
(u8)((desc[1] &
SDMA_DESC1_HEADER_MODE_SMASK) >>
SDMA_DESC1_HEADER_MODE_SHIFT),
(u8)((desc[1] &
SDMA_DESC1_HEADER_DWS_SMASK) >>
SDMA_DESC1_HEADER_DWS_SHIFT));
head++;
head &= sde->sdma_mask;
}
......@@ -1866,12 +1863,9 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
sdma_state_name(sde->state.current_state),
(unsigned long long)read_sde_csr(sde, SD(CTRL)),
(unsigned long long)read_sde_csr(sde, SD(STATUS)),
(unsigned long long)read_sde_csr(sde,
SD(ENG_ERR_STATUS)),
(unsigned long long)read_sde_csr(sde, SD(TAIL)),
tail,
(unsigned long long)read_sde_csr(sde, SD(HEAD)),
head,
(unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
(unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
(unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
(unsigned long long)le64_to_cpu(*sde->head_dma),
(unsigned long long)read_sde_csr(sde, SD(MEMORY)),
(unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
......@@ -1909,10 +1903,12 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
head, flags, addr, gen, len);
if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
(u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
>> SDMA_DESC1_HEADER_INDEX_SHIFT),
(u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
>> SDMA_DESC1_HEADER_MODE_SHIFT));
(u8)((desc[1] &
SDMA_DESC1_HEADER_INDEX_SMASK) >>
SDMA_DESC1_HEADER_INDEX_SHIFT),
(u8)((desc[1] &
SDMA_DESC1_HEADER_MODE_SMASK) >>
SDMA_DESC1_HEADER_MODE_SHIFT));
head = (head + 1) & sde->sdma_mask;
}
}
......@@ -2108,8 +2104,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
* -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
* -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
*/
int sdma_send_txlist(struct sdma_engine *sde,
struct iowait *wait,
int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
struct list_head *tx_list)
{
struct sdma_txreq *tx, *tx_next;
......@@ -2178,8 +2173,7 @@ int sdma_send_txlist(struct sdma_engine *sde,
goto update_tail;
}
static void sdma_process_event(struct sdma_engine *sde,
enum sdma_events event)
static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
{
unsigned long flags;
......@@ -3003,7 +2997,8 @@ void sdma_freeze(struct hfi1_devdata *dd)
* continuing.
*/
ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
atomic_read(&dd->sdma_unfreeze_count) <= 0);
atomic_read(&dd->sdma_unfreeze_count) <=
0);
/* interrupted or count is negative, then unloading - just exit */
if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
return;
......@@ -3054,5 +3049,6 @@ void _sdma_engine_progress_schedule(
trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
/* assume we have selected a good cpu */
write_csr(sde->dd,
CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), sde->progress_mask);
CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
sde->progress_mask);
}
......@@ -707,8 +707,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
&cc_setting_bin_attr);
ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
if (ret) {
dd_dev_err(dd,
"Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
......@@ -716,8 +715,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
goto bail_cc;
}
ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
&cc_table_bin_attr);
ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_table_bin_attr);
if (ret) {
dd_dev_err(dd,
"Skipping Congestion Control table sysfs info, (err %d) port %u\n",
......
......@@ -154,7 +154,8 @@ const char *parse_everbs_hdrs(
case OP(RC, COMPARE_SWAP):
case OP(RC, FETCH_ADD):
trace_seq_printf(p, ATOMICETH_PRN,
(unsigned long long)ib_u64_get(eh->atomic_eth.vaddr),
(unsigned long long)ib_u64_get(
eh->atomic_eth.vaddr),
eh->atomic_eth.rkey,
(unsigned long long)ib_u64_get(
(__be32 *)&eh->atomic_eth.swap_data),
......@@ -187,12 +188,12 @@ const char *parse_sdma_flags(
trace_seq_printf(p, "%s", flags);
if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG)
trace_seq_printf(p, " amode:%u aidx:%u alen:%u",
(u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT)
& SDMA_DESC1_HEADER_MODE_MASK),
(u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT)
& SDMA_DESC1_HEADER_INDEX_MASK),
(u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT)
& SDMA_DESC1_HEADER_DWS_MASK));
(u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) &
SDMA_DESC1_HEADER_MODE_MASK),
(u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) &
SDMA_DESC1_HEADER_INDEX_MASK),
(u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) &
SDMA_DESC1_HEADER_DWS_MASK));
return ret;
}
......
......@@ -83,10 +83,10 @@ TRACE_EVENT(hfi1_rcvhdr,
u32 hlen,
u32 tlen,
u32 updegr,
u32 etail),
u32 etail
),
TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(u64, eflags)
__field(u32, ctxt)
__field(u32, etype)
......@@ -95,8 +95,7 @@ TRACE_EVENT(hfi1_rcvhdr,
__field(u32, updegr)
__field(u32, etail)
),
TP_fast_assign(
DD_DEV_ASSIGN(dd);
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->eflags = eflags;
__entry->ctxt = ctxt;
__entry->etype = etype;
......@@ -106,7 +105,7 @@ TRACE_EVENT(hfi1_rcvhdr,
__entry->etail = etail;
),
TP_printk(
"[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
"[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
__get_str(dev),
__entry->ctxt,
__entry->eflags,
......@@ -121,14 +120,12 @@ TRACE_EVENT(hfi1_rcvhdr,
TRACE_EVENT(hfi1_receive_interrupt,
TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
TP_ARGS(dd, ctxt),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(u32, ctxt)
__field(u8, slow_path)
__field(u8, dma_rtail)
),
TP_fast_assign(
DD_DEV_ASSIGN(dd);
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = ctxt;
if (dd->rcd[ctxt]->do_interrupt ==
&handle_receive_interrupt) {
......@@ -144,8 +141,7 @@ TRACE_EVENT(hfi1_receive_interrupt,
__entry->slow_path = 0;
}
),
TP_printk(
"[%s] ctxt %d SlowPath: %d DmaRtail: %d",
TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
__get_str(dev),
__entry->ctxt,
__entry->slow_path,
......@@ -283,20 +279,17 @@ TRACE_EVENT(hfi1_mmu_invalidate,
TRACE_EVENT(hfi1_piofree,
TP_PROTO(struct send_context *sc, int extra),
TP_ARGS(sc, extra),
TP_STRUCT__entry(
DD_DEV_ENTRY(sc->dd)
TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
__field(u32, sw_index)
__field(u32, hw_context)
__field(int, extra)
),
TP_fast_assign(
DD_DEV_ASSIGN(sc->dd);
TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
__entry->sw_index = sc->sw_index;
__entry->hw_context = sc->hw_context;
__entry->extra = extra;
),
TP_printk(
"[%s] ctxt %u(%u) extra %d",
TP_printk("[%s] ctxt %u(%u) extra %d",
__get_str(dev),
__entry->sw_index,
__entry->hw_context,
......@@ -307,22 +300,19 @@ TRACE_EVENT(hfi1_piofree,
TRACE_EVENT(hfi1_wantpiointr,
TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
TP_ARGS(sc, needint, credit_ctrl),
TP_STRUCT__entry(
DD_DEV_ENTRY(sc->dd)
TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
__field(u32, sw_index)
__field(u32, hw_context)
__field(u32, needint)
__field(u64, credit_ctrl)
),
TP_fast_assign(
DD_DEV_ASSIGN(sc->dd);
TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
__entry->sw_index = sc->sw_index;
__entry->hw_context = sc->hw_context;
__entry->needint = needint;
__entry->credit_ctrl = credit_ctrl;
),
TP_printk(
"[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
__get_str(dev),
__entry->sw_index,
__entry->hw_context,
......@@ -367,16 +357,11 @@ DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
#define TRACE_SYSTEM hfi1_ibhdrs
u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
const char *parse_everbs_hdrs(
struct trace_seq *p,
u8 opcode,
void *ehdrs);
const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
const char *parse_sdma_flags(
struct trace_seq *p,
u64 desc0, u64 desc1);
const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
......@@ -499,11 +484,11 @@ DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
__entry->pkey =
be32_to_cpu(ohdr->bth[0]) & 0xffff;
__entry->f =
(be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT)
& HFI1_FECN_MASK;
(be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
HFI1_FECN_MASK;
__entry->b =
(be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT)
& HFI1_BECN_MASK;
(be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
HFI1_BECN_MASK;
__entry->qpn =
be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
__entry->a =
......@@ -512,9 +497,7 @@ DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
__entry->psn =
be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
/* extended headers */
memcpy(
__get_dynamic_array(ehdrs),
&ohdr->u,
memcpy(__get_dynamic_array(ehdrs), &ohdr->u,
ibhdr_exhdr_len(hdr));
),
TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
......@@ -602,7 +585,8 @@ TRACE_EVENT(snoop_capture,
memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
memcpy(__get_dynamic_array(raw_pkt), data, data_len);
),
TP_printk("[%s] " SNOOP_PRN,
TP_printk(
"[%s] " SNOOP_PRN,
__get_str(dev),
__entry->slid,
__entry->dlid,
......@@ -625,8 +609,7 @@ TRACE_EVENT(snoop_capture,
TRACE_EVENT(hfi1_uctxtdata,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
TP_ARGS(dd, uctxt),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(unsigned, ctxt)
__field(u32, credits)
__field(u64, hw_free)
......@@ -636,8 +619,7 @@ TRACE_EVENT(hfi1_uctxtdata,
__field(u32, eager_cnt)
__field(u64, rcvegr_phys)
),
TP_fast_assign(
DD_DEV_ASSIGN(dd);
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = uctxt->ctxt;
__entry->credits = uctxt->sc->credits;
__entry->hw_free = (u64)uctxt->sc->hw_free;
......@@ -645,10 +627,10 @@ TRACE_EVENT(hfi1_uctxtdata,
__entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
__entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
__entry->eager_cnt = uctxt->egrbufs.alloced;
__entry->rcvegr_phys = uctxt->egrbufs.rcvtids[0].phys;
__entry->rcvegr_phys =
uctxt->egrbufs.rcvtids[0].phys;
),
TP_printk(
"[%s] ctxt %u " UCTXT_FMT,
TP_printk("[%s] ctxt %u " UCTXT_FMT,
__get_str(dev),
__entry->ctxt,
__entry->credits,
......@@ -659,7 +641,7 @@ TRACE_EVENT(hfi1_uctxtdata,
__entry->eager_cnt,
__entry->rcvegr_phys
)
);
);
#define CINFO_FMT \
"egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
......@@ -667,8 +649,7 @@ TRACE_EVENT(hfi1_ctxt_info,
TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt,
struct hfi1_ctxt_info cinfo),
TP_ARGS(dd, ctxt, subctxt, cinfo),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(unsigned, ctxt)
__field(unsigned, subctxt)
__field(u16, egrtids)
......@@ -677,8 +658,7 @@ TRACE_EVENT(hfi1_ctxt_info,
__field(u16, sdma_ring_size)
__field(u32, rcvegr_size)
),
TP_fast_assign(
DD_DEV_ASSIGN(dd);
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
__entry->egrtids = cinfo.egrtids;
......@@ -687,8 +667,7 @@ TRACE_EVENT(hfi1_ctxt_info,
__entry->sdma_ring_size = cinfo.sdma_ring_size;
__entry->rcvegr_size = cinfo.rcvegr_size;
),
TP_printk(
"[%s] ctxt %u:%u " CINFO_FMT,
TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
__get_str(dev),
__entry->ctxt,
__entry->subctxt,
......@@ -698,7 +677,7 @@ TRACE_EVENT(hfi1_ctxt_info,
__entry->rcvhdrq_size,
__entry->sdma_ring_size
)
);
);
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_sma
......@@ -712,17 +691,14 @@ TRACE_EVENT(hfi1_ctxt_info,
)
DECLARE_EVENT_CLASS(hfi1_bct_template,
TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
TP_PROTO(struct hfi1_devdata *dd,
struct buffer_control *bc),
TP_ARGS(dd, bc),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__dynamic_array(u8, bct, sizeof(*bc))
),
TP_fast_assign(
DD_DEV_ASSIGN(dd);
memcpy(
__get_dynamic_array(bct),
bc,
TP_fast_assign(DD_DEV_ASSIGN(dd);
memcpy(__get_dynamic_array(bct), bc,
sizeof(*bc));
),
TP_printk(BCT_FORMAT,
......@@ -769,23 +745,20 @@ DEFINE_EVENT(hfi1_bct_template, bct_get,
#define TRACE_SYSTEM hfi1_sdma
TRACE_EVENT(hfi1_sdma_descriptor,
TP_PROTO(
struct sdma_engine *sde,
TP_PROTO(struct sdma_engine *sde,
u64 desc0,
u64 desc1,
u16 e,
void *descp),
TP_ARGS(sde, desc0, desc1, e, descp),
TP_STRUCT__entry(
DD_DEV_ENTRY(sde->dd)
TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
__field(void *, descp)
__field(u64, desc0)
__field(u64, desc1)
__field(u16, e)
__field(u8, idx)
),
TP_fast_assign(
DD_DEV_ASSIGN(sde->dd);
TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
__entry->desc0 = desc0;
__entry->desc1 = desc1;
__entry->idx = sde->this_idx;
......@@ -797,12 +770,12 @@ TRACE_EVENT(hfi1_sdma_descriptor,
__get_str(dev),
__entry->idx,
__parse_sdma_flags(__entry->desc0, __entry->desc1),
(__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT)
& SDMA_DESC0_PHY_ADDR_MASK,
(u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT)
& SDMA_DESC1_GENERATION_MASK),
(u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT)
& SDMA_DESC0_BYTE_COUNT_MASK),
(__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
SDMA_DESC0_PHY_ADDR_MASK,
(u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
SDMA_DESC1_GENERATION_MASK),
(u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
SDMA_DESC0_BYTE_COUNT_MASK),
__entry->desc0,
__entry->desc1,
__entry->descp,
......@@ -813,20 +786,17 @@ TRACE_EVENT(hfi1_sdma_descriptor,
TRACE_EVENT(hfi1_sdma_engine_select,
TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
TP_ARGS(dd, sel, vl, idx),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(u32, sel)
__field(u8, vl)
__field(u8, idx)
),
TP_fast_assign(
DD_DEV_ASSIGN(dd);
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->sel = sel;
__entry->vl = vl;
__entry->idx = idx;
),
TP_printk(
"[%s] selecting SDE %u sel 0x%x vl %u",
TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
__get_str(dev),
__entry->idx,
__entry->sel,
......@@ -835,23 +805,17 @@ TRACE_EVENT(hfi1_sdma_engine_select,
);
DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
TP_PROTO(
struct sdma_engine *sde,
u64 status
),
TP_PROTO(struct sdma_engine *sde, u64 status),
TP_ARGS(sde, status),
TP_STRUCT__entry(
DD_DEV_ENTRY(sde->dd)
TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
__field(u64, status)
__field(u8, idx)
),
TP_fast_assign(
DD_DEV_ASSIGN(sde->dd);
TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
__entry->status = status;
__entry->idx = sde->this_idx;
),
TP_printk(
"[%s] SDE(%u) status %llx",
TP_printk("[%s] SDE(%u) status %llx",
__get_str(dev),
__entry->idx,
(unsigned long long)__entry->status
......@@ -859,39 +823,27 @@ DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
);
DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
TP_PROTO(
struct sdma_engine *sde,
u64 status
),
TP_PROTO(struct sdma_engine *sde, u64 status),
TP_ARGS(sde, status)
);
DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
TP_PROTO(
struct sdma_engine *sde,
u64 status
),
TP_PROTO(struct sdma_engine *sde, u64 status),
TP_ARGS(sde, status)
);
DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
TP_PROTO(
struct sdma_engine *sde,
int aidx
),
TP_PROTO(struct sdma_engine *sde, int aidx),
TP_ARGS(sde, aidx),
TP_STRUCT__entry(
DD_DEV_ENTRY(sde->dd)
TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
__field(int, aidx)
__field(u8, idx)
),
TP_fast_assign(
DD_DEV_ASSIGN(sde->dd);
TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
__entry->idx = sde->this_idx;
__entry->aidx = aidx;
),
TP_printk(
"[%s] SDE(%u) aidx %d",
TP_printk("[%s] SDE(%u) aidx %d",
__get_str(dev),
__entry->idx,
__entry->aidx
......@@ -899,30 +851,22 @@ DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
);
DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
TP_PROTO(
struct sdma_engine *sde,
int aidx
),
TP_PROTO(struct sdma_engine *sde, int aidx),
TP_ARGS(sde, aidx));
DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
TP_PROTO(
struct sdma_engine *sde,
int aidx
),
TP_PROTO(struct sdma_engine *sde, int aidx),
TP_ARGS(sde, aidx));
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
TRACE_EVENT(hfi1_sdma_progress,
TP_PROTO(
struct sdma_engine *sde,
TP_PROTO(struct sdma_engine *sde,
u16 hwhead,
u16 swhead,
struct sdma_txreq *txp
),
TP_ARGS(sde, hwhead, swhead, txp),
TP_STRUCT__entry(
DD_DEV_ENTRY(sde->dd)
TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
__field(u64, sn)
__field(u16, hwhead)
__field(u16, swhead)
......@@ -931,8 +875,7 @@ TRACE_EVENT(hfi1_sdma_progress,
__field(u16, tx_head)
__field(u8, idx)
),
TP_fast_assign(
DD_DEV_ASSIGN(sde->dd);
TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
__entry->hwhead = hwhead;
__entry->swhead = swhead;
__entry->tx_tail = sde->tx_tail;
......@@ -955,15 +898,12 @@ TRACE_EVENT(hfi1_sdma_progress,
);
#else
TRACE_EVENT(hfi1_sdma_progress,
TP_PROTO(
struct sdma_engine *sde,
u16 hwhead,
u16 swhead,
TP_PROTO(struct sdma_engine *sde,
u16 hwhead, u16 swhead,
struct sdma_txreq *txp
),
TP_ARGS(sde, hwhead, swhead, txp),
TP_STRUCT__entry(
DD_DEV_ENTRY(sde->dd)
TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
__field(u16, hwhead)
__field(u16, swhead)
__field(u16, txnext)
......@@ -971,8 +911,7 @@ TRACE_EVENT(hfi1_sdma_progress,
__field(u16, tx_head)
__field(u8, idx)
),
TP_fast_assign(
DD_DEV_ASSIGN(sde->dd);
TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
__entry->hwhead = hwhead;
__entry->swhead = swhead;
__entry->tx_tail = sde->tx_tail;
......@@ -994,23 +933,17 @@ TRACE_EVENT(hfi1_sdma_progress,
#endif
DECLARE_EVENT_CLASS(hfi1_sdma_sn,
TP_PROTO(
struct sdma_engine *sde,
u64 sn
),
TP_PROTO(struct sdma_engine *sde, u64 sn),
TP_ARGS(sde, sn),
TP_STRUCT__entry(
DD_DEV_ENTRY(sde->dd)
TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
__field(u64, sn)
__field(u8, idx)
),
TP_fast_assign(
DD_DEV_ASSIGN(sde->dd);
TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
__entry->sn = sn;
__entry->idx = sde->this_idx;
),
TP_printk(
"[%s] SDE(%u) sn %llu",
TP_printk("[%s] SDE(%u) sn %llu",
__get_str(dev),
__entry->idx,
__entry->sn
......@@ -1026,10 +959,7 @@ DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
);
DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
TP_PROTO(
struct sdma_engine *sde,
u64 sn
),
TP_PROTO(struct sdma_engine *sde, u64 sn),
TP_ARGS(sde, sn)
);
......@@ -1230,19 +1160,16 @@ TRACE_EVENT(hfi1_sdma_user_header_ahg,
);
TRACE_EVENT(hfi1_sdma_state,
TP_PROTO(
struct sdma_engine *sde,
TP_PROTO(struct sdma_engine *sde,
const char *cstate,
const char *nstate
),
TP_ARGS(sde, cstate, nstate),
TP_STRUCT__entry(
DD_DEV_ENTRY(sde->dd)
TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
__string(curstate, cstate)
__string(newstate, nstate)
),
TP_fast_assign(
DD_DEV_ASSIGN(sde->dd);
TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
__assign_str(curstate, cstate);
__assign_str(newstate, nstate);
),
......@@ -1322,14 +1249,13 @@ TRACE_EVENT(hfi1_interrupt,
TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
int src),
TP_ARGS(dd, is_entry, src),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__array(char, buf, 64)
__field(int, src)
),
TP_fast_assign(
DD_DEV_ASSIGN(dd)
is_entry->is_name(__entry->buf, 64, src - is_entry->start);
TP_fast_assign(DD_DEV_ASSIGN(dd)
is_entry->is_name(__entry->buf, 64,
src - is_entry->start);
__entry->src = src;
),
TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
......@@ -1350,15 +1276,15 @@ TRACE_EVENT(hfi1_interrupt,
DECLARE_EVENT_CLASS(hfi1_trace_template,
TP_PROTO(const char *function, struct va_format *vaf),
TP_ARGS(function, vaf),
TP_STRUCT__entry(
__string(function, function)
TP_STRUCT__entry(__string(function, function)
__dynamic_array(char, msg, MAX_MSG_LEN)
),
TP_fast_assign(
__assign_str(function, function);
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
TP_fast_assign(__assign_str(function, function);
WARN_ON_ONCE(vsnprintf
(__get_dynamic_array(msg),
MAX_MSG_LEN, vaf->fmt,
*vaf->va) >= MAX_MSG_LEN);
*vaf->va) >=
MAX_MSG_LEN);
),
TP_printk("(%s) %s",
__get_str(function),
......
......@@ -320,7 +320,8 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (ah_attr->dlid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
if (unlikely(!loopback && (lid == ppd->lid ||
if (unlikely(!loopback &&
(lid == ppd->lid ||
(lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
qp->ibqp.qp_type == IB_QPT_GSI)))) {
unsigned long flags;
......
......@@ -970,7 +970,8 @@ static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
/* The most likely matching pkey has index qp->s_pkey_index */
if (unlikely(!egress_pkey_matches_entry(pkey,
ppd->pkeys[qp->s_pkey_index]))) {
ppd->pkeys
[qp->s_pkey_index]))) {
/* no match - try the entire table */
for (; i < MAX_PKEY_VALUES; i++) {
if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment