Commit 87a9404e authored by Neil Horman's avatar Neil Horman Committed by Greg Kroah-Hartman

staging: unisys: Clean up kthread usage

Remove the has_stopped completion as theres already one available
internally.

Correct the while loops

Remove the while loop in drain_queue as it already exists in the top level
loop
Signed-off-by: default avatarNeil Horman <nhorman@redhat.com>
Signed-off-by: default avatarBenjamin Romer <benjamin.romer@unisys.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 513e1cbd
...@@ -91,7 +91,6 @@ static struct visor_driver visornic_driver = { ...@@ -91,7 +91,6 @@ static struct visor_driver visornic_driver = {
struct visor_thread_info { struct visor_thread_info {
struct task_struct *task; struct task_struct *task;
struct completion has_stopped;
int id; int id;
}; };
...@@ -317,7 +316,6 @@ static int visor_thread_start(struct visor_thread_info *thrinfo, ...@@ -317,7 +316,6 @@ static int visor_thread_start(struct visor_thread_info *thrinfo,
void *thrcontext, char *name) void *thrcontext, char *name)
{ {
/* used to stop the thread */ /* used to stop the thread */
init_completion(&thrinfo->has_stopped);
thrinfo->task = kthread_run(threadfn, thrcontext, name); thrinfo->task = kthread_run(threadfn, thrcontext, name);
if (IS_ERR(thrinfo->task)) { if (IS_ERR(thrinfo->task)) {
pr_debug("%s failed (%ld)\n", pr_debug("%s failed (%ld)\n",
...@@ -341,10 +339,8 @@ static void visor_thread_stop(struct visor_thread_info *thrinfo) ...@@ -341,10 +339,8 @@ static void visor_thread_stop(struct visor_thread_info *thrinfo)
if (!thrinfo->id) if (!thrinfo->id)
return; /* thread not running */ return; /* thread not running */
kthread_stop(thrinfo->task); BUG_ON(kthread_stop(thrinfo->task));
/* give up if the thread has NOT died in 1 minute */ thrinfo->id = 0;
if (wait_for_completion_timeout(&thrinfo->has_stopped, 60 * HZ))
thrinfo->id = 0;
} }
static ssize_t enable_ints_write(struct file *file, static ssize_t enable_ints_write(struct file *file,
...@@ -1691,99 +1687,95 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata) ...@@ -1691,99 +1687,95 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
unsigned long flags; unsigned long flags;
struct net_device *netdev; struct net_device *netdev;
/* drain queue */ /* TODO: CLIENT ACQUIRE -- Don't really need this at the
while (1) { * moment */
/* TODO: CLIENT ACQUIRE -- Don't really need this at the if (!visorchannel_signalremove(devdata->dev->visorchannel,
* moment */ IOCHAN_FROM_IOPART,
if (!visorchannel_signalremove(devdata->dev->visorchannel, cmdrsp))
IOCHAN_FROM_IOPART, return; /* queue empty */
cmdrsp))
break; /* queue empty */ switch (cmdrsp->net.type) {
case NET_RCV:
switch (cmdrsp->net.type) { devdata->chstat.got_rcv++;
case NET_RCV: /* process incoming packet */
devdata->chstat.got_rcv++; visornic_rx(cmdrsp);
/* process incoming packet */ break;
visornic_rx(cmdrsp); case NET_XMIT_DONE:
break; spin_lock_irqsave(&devdata->priv_lock, flags);
case NET_XMIT_DONE: devdata->chstat.got_xmit_done++;
spin_lock_irqsave(&devdata->priv_lock, flags); if (cmdrsp->net.xmtdone.xmt_done_result)
devdata->chstat.got_xmit_done++; devdata->chstat.xmit_fail++;
if (cmdrsp->net.xmtdone.xmt_done_result) /* only call queue wake if we stopped it */
devdata->chstat.xmit_fail++; netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
/* only call queue wake if we stopped it */ /* ASSERT netdev == vnicinfo->netdev; */
netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev; if ((netdev == devdata->netdev) &&
/* ASSERT netdev == vnicinfo->netdev; */ netif_queue_stopped(netdev)) {
if ((netdev == devdata->netdev) && /* check to see if we have crossed
netif_queue_stopped(netdev)) { * the lower watermark for
/* check to see if we have crossed * netif_wake_queue()
* the lower watermark for */
* netif_wake_queue() if (((devdata->chstat.sent_xmit >=
devdata->chstat.got_xmit_done) &&
(devdata->chstat.sent_xmit -
devdata->chstat.got_xmit_done <=
devdata->lower_threshold_net_xmits)) ||
((devdata->chstat.sent_xmit <
devdata->chstat.got_xmit_done) &&
(ULONG_MAX - devdata->chstat.got_xmit_done
+ devdata->chstat.sent_xmit <=
devdata->lower_threshold_net_xmits))) {
/* enough NET_XMITs completed
* so can restart netif queue
*/ */
if (((devdata->chstat.sent_xmit >=
devdata->chstat.got_xmit_done) &&
(devdata->chstat.sent_xmit -
devdata->chstat.got_xmit_done <=
devdata->lower_threshold_net_xmits)) ||
((devdata->chstat.sent_xmit <
devdata->chstat.got_xmit_done) &&
(ULONG_MAX - devdata->chstat.got_xmit_done
+ devdata->chstat.sent_xmit <=
devdata->lower_threshold_net_xmits))) {
/* enough NET_XMITs completed
* so can restart netif queue
*/
netif_wake_queue(netdev);
devdata->flow_control_lower_hits++;
}
}
skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
kfree_skb(cmdrsp->net.buf);
break;
case NET_RCV_ENBDIS_ACK:
devdata->chstat.got_enbdisack++;
netdev = (struct net_device *)
cmdrsp->net.enbdis.context;
spin_lock_irqsave(&devdata->priv_lock, flags);
devdata->enab_dis_acked = 1;
spin_unlock_irqrestore(&devdata->priv_lock, flags);
if (devdata->server_down &&
devdata->server_change_state) {
/* Inform Linux that the link is up */
devdata->server_down = false;
devdata->server_change_state = false;
netif_wake_queue(netdev); netif_wake_queue(netdev);
netif_carrier_on(netdev); devdata->flow_control_lower_hits++;
} }
break;
case NET_CONNECT_STATUS:
netdev = devdata->netdev;
if (cmdrsp->net.enbdis.enable == 1) {
spin_lock_irqsave(&devdata->priv_lock, flags);
devdata->enabled = cmdrsp->net.enbdis.enable;
spin_unlock_irqrestore(&devdata->priv_lock,
flags);
netif_wake_queue(netdev);
netif_carrier_on(netdev);
} else {
netif_stop_queue(netdev);
netif_carrier_off(netdev);
spin_lock_irqsave(&devdata->priv_lock, flags);
devdata->enabled = cmdrsp->net.enbdis.enable;
spin_unlock_irqrestore(&devdata->priv_lock,
flags);
}
break;
default:
break;
} }
/* cmdrsp is now available for reuse */ skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
kfree_skb(cmdrsp->net.buf);
break;
case NET_RCV_ENBDIS_ACK:
devdata->chstat.got_enbdisack++;
netdev = (struct net_device *)
cmdrsp->net.enbdis.context;
spin_lock_irqsave(&devdata->priv_lock, flags);
devdata->enab_dis_acked = 1;
spin_unlock_irqrestore(&devdata->priv_lock, flags);
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
if (devdata->server_down &&
devdata->server_change_state) {
/* Inform Linux that the link is up */
devdata->server_down = false;
devdata->server_change_state = false;
netif_wake_queue(netdev);
netif_carrier_on(netdev);
}
break;
case NET_CONNECT_STATUS:
netdev = devdata->netdev;
if (cmdrsp->net.enbdis.enable == 1) {
spin_lock_irqsave(&devdata->priv_lock, flags);
devdata->enabled = cmdrsp->net.enbdis.enable;
spin_unlock_irqrestore(&devdata->priv_lock,
flags);
netif_wake_queue(netdev);
netif_carrier_on(netdev);
} else {
netif_stop_queue(netdev);
netif_carrier_off(netdev);
spin_lock_irqsave(&devdata->priv_lock, flags);
devdata->enabled = cmdrsp->net.enbdis.enable;
spin_unlock_irqrestore(&devdata->priv_lock,
flags);
}
break;
default:
break;
} }
/* cmdrsp is now available for reuse */
} }
/** /**
...@@ -1803,9 +1795,9 @@ process_incoming_rsps(void *v) ...@@ -1803,9 +1795,9 @@ process_incoming_rsps(void *v)
cmdrsp = kmalloc(SZ, GFP_ATOMIC); cmdrsp = kmalloc(SZ, GFP_ATOMIC);
if (!cmdrsp) if (!cmdrsp)
complete_and_exit(&devdata->threadinfo.has_stopped, 0); return 0;
while (1) { while (!kthread_should_stop()) {
wait_event_interruptible_timeout( wait_event_interruptible_timeout(
devdata->rsp_queue, (atomic_read( devdata->rsp_queue, (atomic_read(
&devdata->interrupt_rcvd) == 1), &devdata->interrupt_rcvd) == 1),
...@@ -1818,12 +1810,10 @@ process_incoming_rsps(void *v) ...@@ -1818,12 +1810,10 @@ process_incoming_rsps(void *v)
atomic_set(&devdata->interrupt_rcvd, 0); atomic_set(&devdata->interrupt_rcvd, 0);
send_rcv_posts_if_needed(devdata); send_rcv_posts_if_needed(devdata);
drain_queue(cmdrsp, devdata); drain_queue(cmdrsp, devdata);
if (kthread_should_stop())
break;
} }
kfree(cmdrsp); kfree(cmdrsp);
complete_and_exit(&devdata->threadinfo.has_stopped, 0); return 0;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment