Commit 6870957e authored by Felix Manlunas's avatar Felix Manlunas Committed by David S. Miller

liquidio: make soft command calls synchronous

1. Add wait_for_sc_completion_timeout() for waiting the response and
   handling common response errors
2. Send sc's synchronously: remove unused callback function,
   and context structure; use wait_for_sc_completion_timeout() to wait
   its response.
Signed-off-by: default avatarWeilin Chang <weilin.chang@cavium.com>
Signed-off-by: default avatarFelix Manlunas <felix.manlunas@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c9aec052
...@@ -1333,8 +1333,6 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, ...@@ -1333,8 +1333,6 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
struct oct_nic_stats_resp *resp = struct oct_nic_stats_resp *resp =
(struct oct_nic_stats_resp *)sc->virtrptr; (struct oct_nic_stats_resp *)sc->virtrptr;
struct oct_nic_stats_ctrl *ctrl =
(struct oct_nic_stats_ctrl *)sc->ctxptr;
struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
...@@ -1424,7 +1422,6 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, ...@@ -1424,7 +1422,6 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
} else { } else {
resp->status = -1; resp->status = -1;
} }
complete(&ctrl->complete);
} }
int octnet_get_link_stats(struct net_device *netdev) int octnet_get_link_stats(struct net_device *netdev)
...@@ -1432,7 +1429,6 @@ int octnet_get_link_stats(struct net_device *netdev) ...@@ -1432,7 +1429,6 @@ int octnet_get_link_stats(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev); struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev; struct octeon_device *oct_dev = lio->oct_dev;
struct octeon_soft_command *sc; struct octeon_soft_command *sc;
struct oct_nic_stats_ctrl *ctrl;
struct oct_nic_stats_resp *resp; struct oct_nic_stats_resp *resp;
int retval; int retval;
...@@ -1441,7 +1437,7 @@ int octnet_get_link_stats(struct net_device *netdev) ...@@ -1441,7 +1437,7 @@ int octnet_get_link_stats(struct net_device *netdev)
octeon_alloc_soft_command(oct_dev, octeon_alloc_soft_command(oct_dev,
0, 0,
sizeof(struct oct_nic_stats_resp), sizeof(struct oct_nic_stats_resp),
sizeof(struct octnic_ctrl_pkt)); 0);
if (!sc) if (!sc)
return -ENOMEM; return -ENOMEM;
...@@ -1449,66 +1445,39 @@ int octnet_get_link_stats(struct net_device *netdev) ...@@ -1449,66 +1445,39 @@ int octnet_get_link_stats(struct net_device *netdev)
resp = (struct oct_nic_stats_resp *)sc->virtrptr; resp = (struct oct_nic_stats_resp *)sc->virtrptr;
memset(resp, 0, sizeof(struct oct_nic_stats_resp)); memset(resp, 0, sizeof(struct oct_nic_stats_resp));
ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; init_completion(&sc->complete);
memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); sc->sc_status = OCTEON_REQUEST_PENDING;
ctrl->netdev = netdev;
init_completion(&ctrl->complete);
sc->iq_no = lio->linfo.txpciq[0].s.q_no; sc->iq_no = lio->linfo.txpciq[0].s.q_no;
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
OPCODE_NIC_PORT_STATS, 0, 0, 0); OPCODE_NIC_PORT_STATS, 0, 0, 0);
sc->callback = octnet_nic_stats_callback;
sc->callback_arg = sc;
sc->wait_time = 500; /*in milli seconds*/
retval = octeon_send_soft_command(oct_dev, sc); retval = octeon_send_soft_command(oct_dev, sc);
if (retval == IQ_SEND_FAILED) { if (retval == IQ_SEND_FAILED) {
octeon_free_soft_command(oct_dev, sc); octeon_free_soft_command(oct_dev, sc);
return -EINVAL; return -EINVAL;
} }
wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); retval = wait_for_sc_completion_timeout(oct_dev, sc,
(2 * LIO_SC_MAX_TMO_MS));
if (resp->status != 1) { if (retval) {
octeon_free_soft_command(oct_dev, sc); dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
return retval;
return -EINVAL;
} }
octeon_free_soft_command(oct_dev, sc); octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
WRITE_ONCE(sc->caller_is_done, true);
return 0; return 0;
} }
static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct,
u32 status,
void *buf)
{
struct liquidio_nic_seapi_ctl_context *ctx;
struct octeon_soft_command *sc = buf;
ctx = sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (status) {
dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n",
__func__,
CVM_CAST64(status));
}
ctx->status = status;
complete(&ctx->complete);
}
int liquidio_set_speed(struct lio *lio, int speed) int liquidio_set_speed(struct lio *lio, int speed)
{ {
struct liquidio_nic_seapi_ctl_context *ctx;
struct octeon_device *oct = lio->oct_dev; struct octeon_device *oct = lio->oct_dev;
struct oct_nic_seapi_resp *resp; struct oct_nic_seapi_resp *resp;
struct octeon_soft_command *sc; struct octeon_soft_command *sc;
union octnet_cmd *ncmd; union octnet_cmd *ncmd;
u32 ctx_size;
int retval; int retval;
u32 var; u32 var;
...@@ -1521,21 +1490,18 @@ int liquidio_set_speed(struct lio *lio, int speed) ...@@ -1521,21 +1490,18 @@ int liquidio_set_speed(struct lio *lio, int speed)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
sizeof(struct oct_nic_seapi_resp), sizeof(struct oct_nic_seapi_resp),
ctx_size); 0);
if (!sc) if (!sc)
return -ENOMEM; return -ENOMEM;
ncmd = sc->virtdptr; ncmd = sc->virtdptr;
ctx = sc->ctxptr;
resp = sc->virtrptr; resp = sc->virtrptr;
memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
ctx->octeon_id = lio_get_device_id(oct); init_completion(&sc->complete);
ctx->status = 0; sc->sc_status = OCTEON_REQUEST_PENDING;
init_completion(&ctx->complete);
ncmd->u64 = 0; ncmd->u64 = 0;
ncmd->s.cmd = SEAPI_CMD_SPEED_SET; ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
...@@ -1548,30 +1514,24 @@ int liquidio_set_speed(struct lio *lio, int speed) ...@@ -1548,30 +1514,24 @@ int liquidio_set_speed(struct lio *lio, int speed)
octeon_prepare_soft_command(oct, sc, OPCODE_NIC, octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_UBOOT_CTL, 0, 0, 0); OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
sc->callback = liquidio_nic_seapi_ctl_callback;
sc->callback_arg = sc;
sc->wait_time = 5000;
retval = octeon_send_soft_command(oct, sc); retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) { if (retval == IQ_SEND_FAILED) {
dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
octeon_free_soft_command(oct, sc);
retval = -EBUSY; retval = -EBUSY;
} else { } else {
/* Wait for response or timeout */ /* Wait for response or timeout */
if (wait_for_completion_timeout(&ctx->complete, retval = wait_for_sc_completion_timeout(oct, sc, 0);
msecs_to_jiffies(10000)) == 0) { if (retval)
dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", return retval;
__func__);
octeon_free_soft_command(oct, sc);
return -EINTR;
}
retval = resp->status; retval = resp->status;
if (retval) { if (retval) {
dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n", dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
__func__, retval); __func__, retval);
octeon_free_soft_command(oct, sc); WRITE_ONCE(sc->caller_is_done, true);
return -EIO; return -EIO;
} }
...@@ -1583,38 +1543,32 @@ int liquidio_set_speed(struct lio *lio, int speed) ...@@ -1583,38 +1543,32 @@ int liquidio_set_speed(struct lio *lio, int speed)
} }
oct->speed_setting = var; oct->speed_setting = var;
WRITE_ONCE(sc->caller_is_done, true);
} }
octeon_free_soft_command(oct, sc);
return retval; return retval;
} }
int liquidio_get_speed(struct lio *lio) int liquidio_get_speed(struct lio *lio)
{ {
struct liquidio_nic_seapi_ctl_context *ctx;
struct octeon_device *oct = lio->oct_dev; struct octeon_device *oct = lio->oct_dev;
struct oct_nic_seapi_resp *resp; struct oct_nic_seapi_resp *resp;
struct octeon_soft_command *sc; struct octeon_soft_command *sc;
union octnet_cmd *ncmd; union octnet_cmd *ncmd;
u32 ctx_size;
int retval; int retval;
ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
sizeof(struct oct_nic_seapi_resp), sizeof(struct oct_nic_seapi_resp),
ctx_size); 0);
if (!sc) if (!sc)
return -ENOMEM; return -ENOMEM;
ncmd = sc->virtdptr; ncmd = sc->virtdptr;
ctx = sc->ctxptr;
resp = sc->virtrptr; resp = sc->virtrptr;
memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
ctx->octeon_id = lio_get_device_id(oct); init_completion(&sc->complete);
ctx->status = 0; sc->sc_status = OCTEON_REQUEST_PENDING;
init_completion(&ctx->complete);
ncmd->u64 = 0; ncmd->u64 = 0;
ncmd->s.cmd = SEAPI_CMD_SPEED_GET; ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
...@@ -1626,37 +1580,20 @@ int liquidio_get_speed(struct lio *lio) ...@@ -1626,37 +1580,20 @@ int liquidio_get_speed(struct lio *lio)
octeon_prepare_soft_command(oct, sc, OPCODE_NIC, octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_UBOOT_CTL, 0, 0, 0); OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
sc->callback = liquidio_nic_seapi_ctl_callback;
sc->callback_arg = sc;
sc->wait_time = 5000;
retval = octeon_send_soft_command(oct, sc); retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) { if (retval == IQ_SEND_FAILED) {
dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
oct->no_speed_setting = 1; octeon_free_soft_command(oct, sc);
oct->speed_setting = 25; retval = -EIO;
retval = -EBUSY;
} else { } else {
if (wait_for_completion_timeout(&ctx->complete, retval = wait_for_sc_completion_timeout(oct, sc, 0);
msecs_to_jiffies(10000)) == 0) { if (retval)
dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", return retval;
__func__);
oct->speed_setting = 25;
oct->no_speed_setting = 1;
octeon_free_soft_command(oct, sc);
return -EINTR;
}
retval = resp->status; retval = resp->status;
if (retval) { if (retval) {
dev_err(&oct->pci_dev->dev, dev_err(&oct->pci_dev->dev,
"%s failed retval=%d\n", __func__, retval); "%s failed retval=%d\n", __func__, retval);
oct->no_speed_setting = 1;
oct->speed_setting = 25;
octeon_free_soft_command(oct, sc);
retval = -EIO; retval = -EIO;
} else { } else {
u32 var; u32 var;
...@@ -1664,16 +1601,23 @@ int liquidio_get_speed(struct lio *lio) ...@@ -1664,16 +1601,23 @@ int liquidio_get_speed(struct lio *lio)
var = be32_to_cpu((__force __be32)resp->speed); var = be32_to_cpu((__force __be32)resp->speed);
oct->speed_setting = var; oct->speed_setting = var;
if (var == 0xffff) { if (var == 0xffff) {
oct->no_speed_setting = 1;
/* unable to access boot variables /* unable to access boot variables
* get the default value based on the NIC type * get the default value based on the NIC type
*/ */
oct->speed_setting = 25; if (oct->subsystem_id ==
OCTEON_CN2350_25GB_SUBSYS_ID ||
oct->subsystem_id ==
OCTEON_CN2360_25GB_SUBSYS_ID) {
oct->no_speed_setting = 1;
oct->speed_setting = 25;
} else {
oct->speed_setting = 10;
}
} }
} }
WRITE_ONCE(sc->caller_is_done, true);
} }
octeon_free_soft_command(oct, sc);
return retval; return retval;
} }
...@@ -2969,30 +2969,15 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, ...@@ -2969,30 +2969,15 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
return 0; return 0;
} }
static void trusted_vf_callback(struct octeon_device *oct_dev,
u32 status, void *ptr)
{
struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
struct lio_trusted_vf_ctx *ctx;
ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr;
ctx->status = status;
complete(&ctx->complete);
}
static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
{ {
struct octeon_device *oct = lio->oct_dev; struct octeon_device *oct = lio->oct_dev;
struct lio_trusted_vf_ctx *ctx;
struct octeon_soft_command *sc; struct octeon_soft_command *sc;
int ctx_size, retval; int retval;
ctx_size = sizeof(struct lio_trusted_vf_ctx);
sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size);
ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr; sc = octeon_alloc_soft_command(oct, 0, 16, 0);
init_completion(&ctx->complete); if (!sc)
return -ENOMEM;
sc->iq_no = lio->linfo.txpciq[0].s.q_no; sc->iq_no = lio->linfo.txpciq[0].s.q_no;
...@@ -3001,23 +2986,21 @@ static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) ...@@ -3001,23 +2986,21 @@ static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
trusted); trusted);
sc->callback = trusted_vf_callback; init_completion(&sc->complete);
sc->callback_arg = sc; sc->sc_status = OCTEON_REQUEST_PENDING;
sc->wait_time = 1000;
retval = octeon_send_soft_command(oct, sc); retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) { if (retval == IQ_SEND_FAILED) {
octeon_free_soft_command(oct, sc);
retval = -1; retval = -1;
} else { } else {
/* Wait for response or timeout */ /* Wait for response or timeout */
if (wait_for_completion_timeout(&ctx->complete, retval = wait_for_sc_completion_timeout(oct, sc, 0);
msecs_to_jiffies(2000))) if (retval)
retval = ctx->status; return (retval);
else
retval = -1;
}
octeon_free_soft_command(oct, sc); WRITE_ONCE(sc->caller_is_done, true);
}
return retval; return retval;
} }
...@@ -3733,7 +3716,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -3733,7 +3716,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
octeon_dev->speed_setting = 10; octeon_dev->speed_setting = 10;
} }
octeon_dev->speed_boot = octeon_dev->speed_setting; octeon_dev->speed_boot = octeon_dev->speed_setting;
} }
devlink = devlink_alloc(&liquidio_devlink_ops, devlink = devlink_alloc(&liquidio_devlink_ops,
......
...@@ -49,44 +49,25 @@ static const struct net_device_ops lio_vf_rep_ndev_ops = { ...@@ -49,44 +49,25 @@ static const struct net_device_ops lio_vf_rep_ndev_ops = {
.ndo_change_mtu = lio_vf_rep_change_mtu, .ndo_change_mtu = lio_vf_rep_change_mtu,
}; };
static void
lio_vf_rep_send_sc_complete(struct octeon_device *oct,
u32 status, void *ptr)
{
struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
struct lio_vf_rep_sc_ctx *ctx =
(struct lio_vf_rep_sc_ctx *)sc->ctxptr;
struct lio_vf_rep_resp *resp =
(struct lio_vf_rep_resp *)sc->virtrptr;
if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status))
WRITE_ONCE(resp->status, 0);
complete(&ctx->complete);
}
static int static int
lio_vf_rep_send_soft_command(struct octeon_device *oct, lio_vf_rep_send_soft_command(struct octeon_device *oct,
void *req, int req_size, void *req, int req_size,
void *resp, int resp_size) void *resp, int resp_size)
{ {
int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size; int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
int ctx_size = sizeof(struct lio_vf_rep_sc_ctx);
struct octeon_soft_command *sc = NULL; struct octeon_soft_command *sc = NULL;
struct lio_vf_rep_resp *rep_resp; struct lio_vf_rep_resp *rep_resp;
struct lio_vf_rep_sc_ctx *ctx;
void *sc_req; void *sc_req;
int err; int err;
sc = (struct octeon_soft_command *) sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, req_size, octeon_alloc_soft_command(oct, req_size,
tot_resp_size, ctx_size); tot_resp_size, 0);
if (!sc) if (!sc)
return -ENOMEM; return -ENOMEM;
ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr; init_completion(&sc->complete);
memset(ctx, 0, ctx_size); sc->sc_status = OCTEON_REQUEST_PENDING;
init_completion(&ctx->complete);
sc_req = (struct lio_vf_rep_req *)sc->virtdptr; sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
memcpy(sc_req, req, req_size); memcpy(sc_req, req, req_size);
...@@ -98,23 +79,24 @@ lio_vf_rep_send_soft_command(struct octeon_device *oct, ...@@ -98,23 +79,24 @@ lio_vf_rep_send_soft_command(struct octeon_device *oct,
sc->iq_no = 0; sc->iq_no = 0;
octeon_prepare_soft_command(oct, sc, OPCODE_NIC, octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_VF_REP_CMD, 0, 0, 0); OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
sc->callback = lio_vf_rep_send_sc_complete;
sc->callback_arg = sc;
sc->wait_time = LIO_VF_REP_REQ_TMO_MS;
err = octeon_send_soft_command(oct, sc); err = octeon_send_soft_command(oct, sc);
if (err == IQ_SEND_FAILED) if (err == IQ_SEND_FAILED)
goto free_buff; goto free_buff;
wait_for_completion_timeout(&ctx->complete, err = wait_for_sc_completion_timeout(oct, sc, 0);
msecs_to_jiffies if (err)
(2 * LIO_VF_REP_REQ_TMO_MS)); return err;
err = READ_ONCE(rep_resp->status) ? -EBUSY : 0; err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
if (err) if (err)
dev_err(&oct->pci_dev->dev, "VF rep send config failed\n"); dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
else if (resp)
if (resp)
memcpy(resp, (rep_resp + 1), resp_size); memcpy(resp, (rep_resp + 1), resp_size);
WRITE_ONCE(sc->caller_is_done, true);
return err;
free_buff: free_buff:
octeon_free_soft_command(oct, sc); octeon_free_soft_command(oct, sc);
......
...@@ -188,6 +188,72 @@ sleep_timeout_cond(wait_queue_head_t *wait_queue, ...@@ -188,6 +188,72 @@ sleep_timeout_cond(wait_queue_head_t *wait_queue,
remove_wait_queue(wait_queue, &we); remove_wait_queue(wait_queue, &we);
} }
/* input parameter:
* sc: pointer to a soft request
* timeout: milli sec which an application wants to wait for the
response of the request.
* 0: the request will wait until its response gets back
* from the firmware within LIO_SC_MAX_TMO_MS milli sec.
* It the response does not return within
* LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
* will move the request to zombie response list.
*
* return value:
* 0: got the response from firmware for the sc request.
* errno -EINTR: user abort the command.
* errno -ETIME: user spefified timeout value has been expired.
* errno -EBUSY: the response of the request does not return in
* resonable time (LIO_SC_MAX_TMO_MS).
* the sc wll be move to zombie response list by
* lio_process_ordered_list()
*
* A request with non-zero return value, the sc->caller_is_done
* will be marked 1.
* When getting a request with zero return value, the requestor
* should mark sc->caller_is_done with 1 after examing the
* response of sc.
* lio_process_ordered_list() will free the soft command on behalf
* of the soft command requestor.
* This is to fix the possible race condition of both timeout process
* and lio_process_ordered_list()/callback function to free a
* sc strucutre.
*/
static inline int
wait_for_sc_completion_timeout(struct octeon_device *oct_dev,
struct octeon_soft_command *sc,
unsigned long timeout)
{
int errno = 0;
long timeout_jiff;
if (timeout)
timeout_jiff = msecs_to_jiffies(timeout);
else
timeout_jiff = MAX_SCHEDULE_TIMEOUT;
timeout_jiff =
wait_for_completion_interruptible_timeout(&sc->complete,
timeout_jiff);
if (timeout_jiff == 0) {
dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n",
__func__);
WRITE_ONCE(sc->caller_is_done, true);
errno = -ETIME;
} else if (timeout_jiff == -ERESTARTSYS) {
dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n",
__func__);
WRITE_ONCE(sc->caller_is_done, true);
errno = -EINTR;
} else if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) {
dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n",
__func__);
WRITE_ONCE(sc->caller_is_done, true);
errno = -EBUSY;
}
return errno;
}
#ifndef ROUNDUP4 #ifndef ROUNDUP4
#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc) #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
#endif #endif
......
...@@ -87,12 +87,6 @@ struct oct_nic_seapi_resp { ...@@ -87,12 +87,6 @@ struct oct_nic_seapi_resp {
u64 status; u64 status;
}; };
struct liquidio_nic_seapi_ctl_context {
int octeon_id;
u32 status;
struct completion complete;
};
/** LiquidIO per-interface network private data */ /** LiquidIO per-interface network private data */
struct lio { struct lio {
/** State of the interface. Rx/Tx happens only in the RUNNING state. */ /** State of the interface. Rx/Tx happens only in the RUNNING state. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment