Commit 50e5dcbe authored by Jubin John's avatar Jubin John Committed by Doug Ledford

staging/rdma/hfi1: Remove space after cast

Remove the space after a cast to fix checkpatch check:
CHECK: No space is necessary after a cast
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarJubin John <jubin.john@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 74182acd
...@@ -5436,7 +5436,7 @@ static void update_rcverr_timer(unsigned long opaque) ...@@ -5436,7 +5436,7 @@ static void update_rcverr_timer(unsigned long opaque)
OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN); OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
queue_work(ppd->hfi1_wq, &ppd->link_bounce_work); queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
} }
dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt; dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
} }
...@@ -6366,7 +6366,7 @@ static void lcb_shutdown(struct hfi1_devdata *dd, int abort) ...@@ -6366,7 +6366,7 @@ static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
reg reg
| (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
| (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT)); | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
(void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
if (!abort) { if (!abort) {
udelay(1); /* must hold for the longer of 16cclks or 20ns */ udelay(1); /* must hold for the longer of 16cclks or 20ns */
write_csr(dd, DCC_CFG_RESET, reg); write_csr(dd, DCC_CFG_RESET, reg);
...@@ -13407,7 +13407,7 @@ static void init_chip(struct hfi1_devdata *dd) ...@@ -13407,7 +13407,7 @@ static void init_chip(struct hfi1_devdata *dd)
* across the clear. * across the clear.
*/ */
write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
(void) read_csr(dd, CCE_DC_CTRL); (void)read_csr(dd, CCE_DC_CTRL);
if (use_flr) { if (use_flr) {
/* /*
......
...@@ -149,8 +149,8 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v) ...@@ -149,8 +149,8 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v)
if (!n_packets && !n_bytes) if (!n_packets && !n_bytes)
return SEQ_SKIP; return SEQ_SKIP;
seq_printf(s, "%02llx %llu/%llu\n", i, seq_printf(s, "%02llx %llu/%llu\n", i,
(unsigned long long) n_packets, (unsigned long long)n_packets,
(unsigned long long) n_bytes); (unsigned long long)n_bytes);
return 0; return 0;
} }
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
#include "verbs.h" #include "verbs.h"
#define BAD_DMA_ADDRESS ((u64) 0) #define BAD_DMA_ADDRESS ((u64)0)
/* /*
* The following functions implement driver specific replacements * The following functions implement driver specific replacements
...@@ -74,7 +74,7 @@ static u64 hfi1_dma_map_single(struct ib_device *dev, void *cpu_addr, ...@@ -74,7 +74,7 @@ static u64 hfi1_dma_map_single(struct ib_device *dev, void *cpu_addr,
if (WARN_ON(!valid_dma_direction(direction))) if (WARN_ON(!valid_dma_direction(direction)))
return BAD_DMA_ADDRESS; return BAD_DMA_ADDRESS;
return (u64) cpu_addr; return (u64)cpu_addr;
} }
static void hfi1_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, static void hfi1_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
...@@ -95,7 +95,7 @@ static u64 hfi1_dma_map_page(struct ib_device *dev, struct page *page, ...@@ -95,7 +95,7 @@ static u64 hfi1_dma_map_page(struct ib_device *dev, struct page *page,
if (offset + size > PAGE_SIZE) if (offset + size > PAGE_SIZE)
return BAD_DMA_ADDRESS; return BAD_DMA_ADDRESS;
addr = (u64) page_address(page); addr = (u64)page_address(page);
if (addr) if (addr)
addr += offset; addr += offset;
...@@ -120,7 +120,7 @@ static int hfi1_map_sg(struct ib_device *dev, struct scatterlist *sgl, ...@@ -120,7 +120,7 @@ static int hfi1_map_sg(struct ib_device *dev, struct scatterlist *sgl,
return BAD_DMA_ADDRESS; return BAD_DMA_ADDRESS;
for_each_sg(sgl, sg, nents, i) { for_each_sg(sgl, sg, nents, i) {
addr = (u64) page_address(sg_page(sg)); addr = (u64)page_address(sg_page(sg));
if (!addr) { if (!addr) {
ret = 0; ret = 0;
break; break;
...@@ -161,14 +161,14 @@ static void *hfi1_dma_alloc_coherent(struct ib_device *dev, size_t size, ...@@ -161,14 +161,14 @@ static void *hfi1_dma_alloc_coherent(struct ib_device *dev, size_t size,
if (p) if (p)
addr = page_address(p); addr = page_address(p);
if (dma_handle) if (dma_handle)
*dma_handle = (u64) addr; *dma_handle = (u64)addr;
return addr; return addr;
} }
static void hfi1_dma_free_coherent(struct ib_device *dev, size_t size, static void hfi1_dma_free_coherent(struct ib_device *dev, size_t size,
void *cpu_addr, u64 dma_handle) void *cpu_addr, u64 dma_handle)
{ {
free_pages((unsigned long) cpu_addr, get_order(size)); free_pages((unsigned long)cpu_addr, get_order(size));
} }
struct ib_dma_mapping_ops hfi1_dma_mapping_ops = { struct ib_dma_mapping_ops hfi1_dma_mapping_ops = {
......
...@@ -594,7 +594,7 @@ static void __prescan_rxq(struct hfi1_packet *packet) ...@@ -594,7 +594,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
while (1) { while (1) {
struct hfi1_devdata *dd = rcd->dd; struct hfi1_devdata *dd = rcd->dd;
struct hfi1_ibport *ibp = &rcd->ppd->ibport_data; struct hfi1_ibport *ibp = &rcd->ppd->ibport_data;
__le32 *rhf_addr = (__le32 *) rcd->rcvhdrq + mdata.ps_head + __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
dd->rhf_offset; dd->rhf_offset;
struct rvt_qp *qp; struct rvt_qp *qp;
struct hfi1_ib_header *hdr; struct hfi1_ib_header *hdr;
...@@ -730,7 +730,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) ...@@ -730,7 +730,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
} }
} }
packet->rhf_addr = (__le32 *) packet->rcd->rcvhdrq + packet->rhqoff + packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
packet->rcd->dd->rhf_offset; packet->rcd->dd->rhf_offset;
packet->rhf = rhf_to_cpu(packet->rhf_addr); packet->rhf = rhf_to_cpu(packet->rhf_addr);
...@@ -969,7 +969,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) ...@@ -969,7 +969,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
/* On to the next packet */ /* On to the next packet */
packet.rhqoff += packet.rsize; packet.rhqoff += packet.rsize;
packet.rhf_addr = (__le32 *) rcd->rcvhdrq + packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
packet.rhqoff + packet.rhqoff +
dd->rhf_offset; dd->rhf_offset;
packet.rhf = rhf_to_cpu(packet.rhf_addr); packet.rhf = rhf_to_cpu(packet.rhf_addr);
......
...@@ -1838,7 +1838,7 @@ void read_guid(struct hfi1_devdata *dd) ...@@ -1838,7 +1838,7 @@ void read_guid(struct hfi1_devdata *dd)
{ {
/* Take the DC out of reset to get a valid GUID value */ /* Take the DC out of reset to get a valid GUID value */
write_csr(dd, CCE_DC_CTRL, 0); write_csr(dd, CCE_DC_CTRL, 0);
(void) read_csr(dd, CCE_DC_CTRL); (void)read_csr(dd, CCE_DC_CTRL);
dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID); dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID);
dd_dev_info(dd, "GUID %llx", dd_dev_info(dd, "GUID %llx",
......
...@@ -1668,7 +1668,7 @@ void hfi1_release_user_pages(struct page **, size_t, bool); ...@@ -1668,7 +1668,7 @@ void hfi1_release_user_pages(struct page **, size_t, bool);
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd) static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{ {
*((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL; *((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL;
} }
static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd) static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
...@@ -1677,7 +1677,7 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd) ...@@ -1677,7 +1677,7 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
* volatile because it's a DMA target from the chip, routine is * volatile because it's a DMA target from the chip, routine is
* inlined, and don't want register caching or reordering. * inlined, and don't want register caching or reordering.
*/ */
return (u32) le64_to_cpu(*rcd->rcvhdrtail_kvaddr); return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr);
} }
/* /*
......
...@@ -744,7 +744,7 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, ...@@ -744,7 +744,7 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp); return reply((struct ib_mad_hdr *)smp);
} }
n_blocks_avail = (u16) (npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1; n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16); size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
...@@ -758,7 +758,7 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, ...@@ -758,7 +758,7 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp); return reply((struct ib_mad_hdr *)smp);
} }
p = (__be16 *) data; p = (__be16 *)data;
q = (u16 *)data; q = (u16 *)data;
/* get the real pkeys if we are requesting the first block */ /* get the real pkeys if we are requesting the first block */
if (start_block == 0) { if (start_block == 0) {
...@@ -1406,7 +1406,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, ...@@ -1406,7 +1406,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u32 n_blocks_sent = OPA_AM_NBLK(am); u32 n_blocks_sent = OPA_AM_NBLK(am);
u32 start_block = am & 0x7ff; u32 start_block = am & 0x7ff;
u16 *p = (u16 *) data; u16 *p = (u16 *)data;
__be16 *q = (__be16 *)data; __be16 *q = (__be16 *)data;
int i; int i;
u16 n_blocks_avail; u16 n_blocks_avail;
...@@ -1586,7 +1586,7 @@ static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, ...@@ -1586,7 +1586,7 @@ static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
{ {
u32 n_blocks = OPA_AM_NBLK(am); u32 n_blocks = OPA_AM_NBLK(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
void *vp = (void *) data; void *vp = (void *)data;
size_t size = 4 * sizeof(u64); size_t size = 4 * sizeof(u64);
if (n_blocks != 1) { if (n_blocks != 1) {
...@@ -1609,7 +1609,7 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, ...@@ -1609,7 +1609,7 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
u32 n_blocks = OPA_AM_NBLK(am); u32 n_blocks = OPA_AM_NBLK(am);
int async_update = OPA_AM_ASYNC(am); int async_update = OPA_AM_ASYNC(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
void *vp = (void *) data; void *vp = (void *)data;
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
int lstate; int lstate;
...@@ -1641,7 +1641,7 @@ static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, ...@@ -1641,7 +1641,7 @@ static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
u32 n_blocks = OPA_AM_NPORT(am); u32 n_blocks = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
void *vp = (void *) data; void *vp = (void *)data;
int size; int size;
if (n_blocks != 1) { if (n_blocks != 1) {
...@@ -1666,7 +1666,7 @@ static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, ...@@ -1666,7 +1666,7 @@ static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
u32 n_blocks = OPA_AM_NPORT(am); u32 n_blocks = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
void *vp = (void *) data; void *vp = (void *)data;
int lstate; int lstate;
if (n_blocks != 1) { if (n_blocks != 1) {
...@@ -1699,7 +1699,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, ...@@ -1699,7 +1699,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
u32 lstate; u32 lstate;
struct hfi1_ibport *ibp; struct hfi1_ibport *ibp;
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
struct opa_port_state_info *psi = (struct opa_port_state_info *) data; struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
if (nports != 1) { if (nports != 1) {
smp->status |= IB_SMP_INVALID_FIELD; smp->status |= IB_SMP_INVALID_FIELD;
...@@ -1748,7 +1748,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data, ...@@ -1748,7 +1748,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
u8 ls_new, ps_new; u8 ls_new, ps_new;
struct hfi1_ibport *ibp; struct hfi1_ibport *ibp;
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
struct opa_port_state_info *psi = (struct opa_port_state_info *) data; struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
int ret, invalid = 0; int ret, invalid = 0;
if (nports != 1) { if (nports != 1) {
...@@ -1834,7 +1834,7 @@ static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data, ...@@ -1834,7 +1834,7 @@ static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
u32 num_ports = OPA_AM_NPORT(am); u32 num_ports = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
struct buffer_control *p = (struct buffer_control *) data; struct buffer_control *p = (struct buffer_control *)data;
int size; int size;
if (num_ports != 1) { if (num_ports != 1) {
...@@ -1857,7 +1857,7 @@ static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data, ...@@ -1857,7 +1857,7 @@ static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
u32 num_ports = OPA_AM_NPORT(am); u32 num_ports = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
struct buffer_control *p = (struct buffer_control *) data; struct buffer_control *p = (struct buffer_control *)data;
if (num_ports != 1) { if (num_ports != 1) {
smp->status |= IB_SMP_INVALID_FIELD; smp->status |= IB_SMP_INVALID_FIELD;
...@@ -1930,10 +1930,10 @@ static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, ...@@ -1930,10 +1930,10 @@ static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
switch (section) { switch (section) {
case OPA_VLARB_LOW_ELEMENTS: case OPA_VLARB_LOW_ELEMENTS:
(void) fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p); (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
break; break;
case OPA_VLARB_HIGH_ELEMENTS: case OPA_VLARB_HIGH_ELEMENTS:
(void) fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p); (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
break; break;
/* neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX /* neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
* can be changed from the default values */ * can be changed from the default values */
...@@ -2522,7 +2522,7 @@ static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, ...@@ -2522,7 +2522,7 @@ static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
idx_from_vl(vl)); idx_from_vl(vl));
if (tmp < sum_vl_xmit_wait) { if (tmp < sum_vl_xmit_wait) {
/* we wrapped */ /* we wrapped */
sum_vl_xmit_wait = (u64) ~0; sum_vl_xmit_wait = (u64)~0;
break; break;
} }
sum_vl_xmit_wait = tmp; sum_vl_xmit_wait = tmp;
...@@ -3287,7 +3287,7 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, ...@@ -3287,7 +3287,7 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
{ {
int i; int i;
struct opa_congestion_setting_attr *p = struct opa_congestion_setting_attr *p =
(struct opa_congestion_setting_attr *) data; (struct opa_congestion_setting_attr *)data;
struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct opa_congestion_setting_entry_shadow *entries; struct opa_congestion_setting_entry_shadow *entries;
...@@ -3326,7 +3326,7 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, ...@@ -3326,7 +3326,7 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
u32 *resp_len) u32 *resp_len)
{ {
struct opa_congestion_setting_attr *p = struct opa_congestion_setting_attr *p =
(struct opa_congestion_setting_attr *) data; (struct opa_congestion_setting_attr *)data;
struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct opa_congestion_setting_entry_shadow *entries; struct opa_congestion_setting_entry_shadow *entries;
...@@ -3418,7 +3418,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, ...@@ -3418,7 +3418,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
u32 *resp_len) u32 *resp_len)
{ {
struct ib_cc_table_attr *cc_table_attr = struct ib_cc_table_attr *cc_table_attr =
(struct ib_cc_table_attr *) data; (struct ib_cc_table_attr *)data;
struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 start_block = OPA_AM_START_BLK(am); u32 start_block = OPA_AM_START_BLK(am);
...@@ -3475,7 +3475,7 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, ...@@ -3475,7 +3475,7 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port, struct ib_device *ibdev, u8 port,
u32 *resp_len) u32 *resp_len)
{ {
struct ib_cc_table_attr *p = (struct ib_cc_table_attr *) data; struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 start_block = OPA_AM_START_BLK(am); u32 start_block = OPA_AM_START_BLK(am);
...@@ -3559,7 +3559,7 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, ...@@ -3559,7 +3559,7 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
u32 *resp_len) u32 *resp_len)
{ {
struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct opa_led_info *p = (struct opa_led_info *) data; struct opa_led_info *p = (struct opa_led_info *)data;
u32 nport = OPA_AM_NPORT(am); u32 nport = OPA_AM_NPORT(am);
u64 reg; u64 reg;
...@@ -3584,7 +3584,7 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, ...@@ -3584,7 +3584,7 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
u32 *resp_len) u32 *resp_len)
{ {
struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct opa_led_info *p = (struct opa_led_info *) data; struct opa_led_info *p = (struct opa_led_info *)data;
u32 nport = OPA_AM_NPORT(am); u32 nport = OPA_AM_NPORT(am);
int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK); int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
...@@ -3800,7 +3800,7 @@ static int subn_get_opa_aggregate(struct opa_smp *smp, ...@@ -3800,7 +3800,7 @@ static int subn_get_opa_aggregate(struct opa_smp *smp,
/* zero the payload for this segment */ /* zero the payload for this segment */
memset(next_smp + sizeof(*agg), 0, agg_data_len); memset(next_smp + sizeof(*agg), 0, agg_data_len);
(void) subn_get_opa_sma(agg->attr_id, smp, am, agg->data, (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
ibdev, port, NULL); ibdev, port, NULL);
if (smp->status & ~IB_SMP_DIRECTION) { if (smp->status & ~IB_SMP_DIRECTION) {
set_aggr_error(agg); set_aggr_error(agg);
...@@ -3844,7 +3844,7 @@ static int subn_set_opa_aggregate(struct opa_smp *smp, ...@@ -3844,7 +3844,7 @@ static int subn_set_opa_aggregate(struct opa_smp *smp,
return reply((struct ib_mad_hdr *)smp); return reply((struct ib_mad_hdr *)smp);
} }
(void) subn_set_opa_sma(agg->attr_id, smp, am, agg->data, (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
ibdev, port, NULL); ibdev, port, NULL);
if (smp->status & ~IB_SMP_DIRECTION) { if (smp->status & ~IB_SMP_DIRECTION) {
set_aggr_error(agg); set_aggr_error(agg);
...@@ -3989,7 +3989,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags, ...@@ -3989,7 +3989,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
smp->method == IB_MGMT_METHOD_SET) && smp->method == IB_MGMT_METHOD_SET) &&
port_num && port_num <= ibdev->phys_port_cnt && port_num && port_num <= ibdev->phys_port_cnt &&
port != port_num) port != port_num)
(void) check_mkey(to_iport(ibdev, port_num), (void)check_mkey(to_iport(ibdev, port_num),
(struct ib_mad_hdr *)smp, 0, (struct ib_mad_hdr *)smp, 0,
smp->mkey, smp->route.dr.dr_slid, smp->mkey, smp->route.dr.dr_slid,
smp->route.dr.return_path, smp->route.dr.return_path,
...@@ -4079,7 +4079,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ...@@ -4079,7 +4079,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
smp->method == IB_MGMT_METHOD_SET) && smp->method == IB_MGMT_METHOD_SET) &&
port_num && port_num <= ibdev->phys_port_cnt && port_num && port_num <= ibdev->phys_port_cnt &&
port != port_num) port != port_num)
(void) check_mkey(to_iport(ibdev, port_num), (void)check_mkey(to_iport(ibdev, port_num),
(struct ib_mad_hdr *)smp, 0, (struct ib_mad_hdr *)smp, 0,
smp->mkey, smp->mkey,
(__force __be32)smp->dr_slid, (__force __be32)smp->dr_slid,
......
...@@ -233,7 +233,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev, ...@@ -233,7 +233,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
*/ */
void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd) void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
{ {
u64 __iomem *base = (void __iomem *) dd->kregbase; u64 __iomem *base = (void __iomem *)dd->kregbase;
dd->flags &= ~HFI1_PRESENT; dd->flags &= ~HFI1_PRESENT;
dd->kregbase = NULL; dd->kregbase = NULL;
...@@ -1188,7 +1188,7 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) ...@@ -1188,7 +1188,7 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
/* step 5h: arm gasket logic */ /* step 5h: arm gasket logic */
/* hold DC in reset across the SBR */ /* hold DC in reset across the SBR */
write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
(void) read_csr(dd, CCE_DC_CTRL); /* DC reset hold */ (void)read_csr(dd, CCE_DC_CTRL); /* DC reset hold */
/* save firmware control across the SBR */ /* save firmware control across the SBR */
fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL); fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL);
......
...@@ -130,7 +130,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op) ...@@ -130,7 +130,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
if (write) { if (write) {
write_csr(dd, SEND_CTRL, reg); write_csr(dd, SEND_CTRL, reg);
if (flush) if (flush)
(void) read_csr(dd, SEND_CTRL); /* flush write */ (void)read_csr(dd, SEND_CTRL); /* flush write */
} }
spin_unlock_irqrestore(&dd->sendctrl_lock, flags); spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
......
...@@ -1610,7 +1610,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp, ...@@ -1610,7 +1610,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
if (opcode == OP(ATOMIC_ACKNOWLEDGE)) { if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
__be32 *p = ohdr->u.at.atomic_ack_eth; __be32 *p = ohdr->u.at.atomic_ack_eth;
val = ((u64) be32_to_cpu(p[0]) << 32) | val = ((u64)be32_to_cpu(p[0]) << 32) |
be32_to_cpu(p[1]); be32_to_cpu(p[1]);
} else } else
val = 0; val = 0;
...@@ -1708,7 +1708,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp, ...@@ -1708,7 +1708,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
aeth = be32_to_cpu(ohdr->u.aeth); aeth = be32_to_cpu(ohdr->u.aeth);
hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0); hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0);
WARN_ON(qp->s_rdma_read_sge.num_sge); WARN_ON(qp->s_rdma_read_sge.num_sge);
(void) do_rc_ack(qp, aeth, psn, (void)do_rc_ack(qp, aeth, psn,
OP(RDMA_READ_RESPONSE_LAST), 0, rcd); OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
goto ack_done; goto ack_done;
} }
...@@ -1906,7 +1906,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, ...@@ -1906,7 +1906,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
* or the send tasklet is already backed up to send an * or the send tasklet is already backed up to send an
* earlier entry, we can ignore this request. * earlier entry, we can ignore this request.
*/ */
if (!e || e->opcode != (u8) opcode || old_req) if (!e || e->opcode != (u8)opcode || old_req)
goto unlock_done; goto unlock_done;
qp->s_tail_ack_queue = prev; qp->s_tail_ack_queue = prev;
break; break;
...@@ -2430,7 +2430,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2430,7 +2430,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
e->rdma_sge.mr = NULL; e->rdma_sge.mr = NULL;
} }
ateth = &ohdr->u.atomic_eth; ateth = &ohdr->u.atomic_eth;
vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) | vaddr = ((u64)be32_to_cpu(ateth->vaddr[0]) << 32) |
be32_to_cpu(ateth->vaddr[1]); be32_to_cpu(ateth->vaddr[1]);
if (unlikely(vaddr & (sizeof(u64) - 1))) if (unlikely(vaddr & (sizeof(u64) - 1)))
goto nack_inv_unlck; goto nack_inv_unlck;
...@@ -2441,11 +2441,11 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2441,11 +2441,11 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
IB_ACCESS_REMOTE_ATOMIC))) IB_ACCESS_REMOTE_ATOMIC)))
goto nack_acc_unlck; goto nack_acc_unlck;
/* Perform atomic OP and save result. */ /* Perform atomic OP and save result. */
maddr = (atomic64_t *) qp->r_sge.sge.vaddr; maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
sdata = be64_to_cpu(ateth->swap_data); sdata = be64_to_cpu(ateth->swap_data);
e->atomic_data = (opcode == OP(FETCH_ADD)) ? e->atomic_data = (opcode == OP(FETCH_ADD)) ?
(u64) atomic64_add_return(sdata, maddr) - sdata : (u64)atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
be64_to_cpu(ateth->compare_data), be64_to_cpu(ateth->compare_data),
sdata); sdata);
rvt_put_mr(qp->r_sge.sge.mr); rvt_put_mr(qp->r_sge.sge.mr);
......
...@@ -508,12 +508,12 @@ static void ruc_loopback(struct rvt_qp *sqp) ...@@ -508,12 +508,12 @@ static void ruc_loopback(struct rvt_qp *sqp)
IB_ACCESS_REMOTE_ATOMIC))) IB_ACCESS_REMOTE_ATOMIC)))
goto acc_err; goto acc_err;
/* Perform atomic OP and save result. */ /* Perform atomic OP and save result. */
maddr = (atomic64_t *) qp->r_sge.sge.vaddr; maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
sdata = wqe->atomic_wr.compare_add; sdata = wqe->atomic_wr.compare_add;
*(u64 *) sqp->s_sge.sge.vaddr = *(u64 *)sqp->s_sge.sge.vaddr =
(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
(u64) atomic64_add_return(sdata, maddr) - sdata : (u64)atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
sdata, wqe->atomic_wr.swap); sdata, wqe->atomic_wr.swap);
rvt_put_mr(qp->r_sge.sge.mr); rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0; qp->r_sge.num_sge = 0;
......
...@@ -534,7 +534,7 @@ static void sdma_err_progress_check(unsigned long data) ...@@ -534,7 +534,7 @@ static void sdma_err_progress_check(unsigned long data)
static void sdma_hw_clean_up_task(unsigned long opaque) static void sdma_hw_clean_up_task(unsigned long opaque)
{ {
struct sdma_engine *sde = (struct sdma_engine *) opaque; struct sdma_engine *sde = (struct sdma_engine *)opaque;
u64 statuscsr; u64 statuscsr;
while (1) { while (1) {
...@@ -594,7 +594,7 @@ static void sdma_flush_descq(struct sdma_engine *sde) ...@@ -594,7 +594,7 @@ static void sdma_flush_descq(struct sdma_engine *sde)
static void sdma_sw_clean_up_task(unsigned long opaque) static void sdma_sw_clean_up_task(unsigned long opaque)
{ {
struct sdma_engine *sde = (struct sdma_engine *) opaque; struct sdma_engine *sde = (struct sdma_engine *)opaque;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&sde->tail_lock, flags); spin_lock_irqsave(&sde->tail_lock, flags);
...@@ -1345,8 +1345,8 @@ static inline u16 sdma_gethead(struct sdma_engine *sde) ...@@ -1345,8 +1345,8 @@ static inline u16 sdma_gethead(struct sdma_engine *sde)
use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
(dd->flags & HFI1_HAS_SDMA_TIMEOUT); (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
hwhead = use_dmahead ? hwhead = use_dmahead ?
(u16) le64_to_cpu(*sde->head_dma) : (u16)le64_to_cpu(*sde->head_dma) :
(u16) read_sde_csr(sde, SD(HEAD)); (u16)read_sde_csr(sde, SD(HEAD));
if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) { if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
u16 cnt; u16 cnt;
...@@ -3021,7 +3021,7 @@ void sdma_freeze(struct hfi1_devdata *dd) ...@@ -3021,7 +3021,7 @@ void sdma_freeze(struct hfi1_devdata *dd)
* software clean will read engine CSRs, so must be completed before * software clean will read engine CSRs, so must be completed before
* the next step, which will clear the engine CSRs. * the next step, which will clear the engine CSRs.
*/ */
(void) wait_event_interruptible(dd->sdma_unfreeze_wq, (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
atomic_read(&dd->sdma_unfreeze_count) <= 0); atomic_read(&dd->sdma_unfreeze_count) <= 0);
/* no need to check results - done no matter what */ /* no need to check results - done no matter what */
} }
......
...@@ -158,7 +158,7 @@ const char *parse_everbs_hdrs( ...@@ -158,7 +158,7 @@ const char *parse_everbs_hdrs(
eh->atomic_eth.rkey, eh->atomic_eth.rkey,
(unsigned long long)ib_u64_get( (unsigned long long)ib_u64_get(
(__be32 *)&eh->atomic_eth.swap_data), (__be32 *)&eh->atomic_eth.swap_data),
(unsigned long long) ib_u64_get( (unsigned long long)ib_u64_get(
(__be32 *)&eh->atomic_eth.compare_data)); (__be32 *)&eh->atomic_eth.compare_data));
break; break;
/* deth */ /* deth */
......
...@@ -1030,7 +1030,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) ...@@ -1030,7 +1030,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
*/ */
static inline int num_user_pages(const struct iovec *iov) static inline int num_user_pages(const struct iovec *iov)
{ {
const unsigned long addr = (unsigned long) iov->iov_base; const unsigned long addr = (unsigned long)iov->iov_base;
const unsigned long len = iov->iov_len; const unsigned long len = iov->iov_len;
const unsigned long spage = addr & PAGE_MASK; const unsigned long spage = addr & PAGE_MASK;
const unsigned long epage = (addr + len - 1) & PAGE_MASK; const unsigned long epage = (addr + len - 1) & PAGE_MASK;
......
...@@ -346,7 +346,7 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, ...@@ -346,7 +346,7 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
*/ */
static inline int cmp_msn(u32 a, u32 b) static inline int cmp_msn(u32 a, u32 b)
{ {
return (((int) a) - ((int) b)) << 8; return (((int)a) - ((int)b)) << 8;
} }
/* /*
...@@ -355,7 +355,7 @@ static inline int cmp_msn(u32 a, u32 b) ...@@ -355,7 +355,7 @@ static inline int cmp_msn(u32 a, u32 b)
*/ */
static inline int cmp_psn(u32 a, u32 b) static inline int cmp_psn(u32 a, u32 b)
{ {
return (((int) a) - ((int) b)) << PSN_SHIFT; return (((int)a) - ((int)b)) << PSN_SHIFT;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment