Commit c826a314 authored by Vasu Dev's avatar Vasu Dev Committed by James Bottomley

[SCSI] fcoe: Out of order tx frames was causing several check condition SCSI status

frames followed by these errors in log.

	[sdp] Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE,SUGGEST_OK
	[sdp] Sense Key : Aborted Command [current]
	[sdp] Add. Sense: Data phase error

This was causing some test apps to exit due to write failure under heavy
load.

This was due to a race around adding and removing tx frame skb in
fcoe_pending_queue, Chris Leech helped me to find that brief unlocking
period when pulling skb from fcoe_pending_queue in various contexts
(fcoe_watchdog and fcoe_xmit) and then adding skb back into fcoe_pending_queue
up on a failed fcoe_start_io could change skb/tx frame order in
fcoe_pending_queue. Thanks Chris.

This patch allows only single context to pull skb from fcoe_pending_queue
at any time to prevent above described ordering issue/race by use of
fcoe_pending_queue_active flag.

This patch simplified fcoe_watchdog with modified fcoe_check_wait_queue by
use of FCOE_LOW_QUEUE_DEPTH instead previously used several conditionals
to clear and set lp->qfull.

I think FCOE_MAX_QUEUE_DEPTH with FCOE_LOW_QUEUE_DEPTH  will work better
in re/setting lp->qfull and these could be fine tuned for performance.
Signed-off-by: default avatarVasu Dev <vasu.dev@intel.com>
Signed-off-by: default avatarRobert Love <robert.w.love@intel.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@HansenPartnership.com>
parent e9041581
...@@ -188,6 +188,7 @@ static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) ...@@ -188,6 +188,7 @@ static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
skb_queue_head_init(&fc->fcoe_pending_queue); skb_queue_head_init(&fc->fcoe_pending_queue);
fc->fcoe_pending_queue_active = 0;
/* setup Source Mac Address */ /* setup Source Mac Address */
memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr, memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
......
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
static int debug_fcoe; static int debug_fcoe;
#define FCOE_MAX_QUEUE_DEPTH 256 #define FCOE_MAX_QUEUE_DEPTH 256
#define FCOE_LOW_QUEUE_DEPTH 32
/* destination address mode */ /* destination address mode */
#define FCOE_GW_ADDR_MODE 0x00 #define FCOE_GW_ADDR_MODE 0x00
...@@ -723,21 +724,12 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) ...@@ -723,21 +724,12 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
*/ */
void fcoe_watchdog(ulong vp) void fcoe_watchdog(ulong vp)
{ {
struct fc_lport *lp;
struct fcoe_softc *fc; struct fcoe_softc *fc;
int qfilled = 0;
read_lock(&fcoe_hostlist_lock); read_lock(&fcoe_hostlist_lock);
list_for_each_entry(fc, &fcoe_hostlist, list) { list_for_each_entry(fc, &fcoe_hostlist, list) {
lp = fc->lp; if (fc->lp)
if (lp) { fcoe_check_wait_queue(fc->lp);
if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
qfilled = 1;
if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
if (qfilled)
lp->qfull = 0;
}
}
} }
read_unlock(&fcoe_hostlist_lock); read_unlock(&fcoe_hostlist_lock);
...@@ -753,8 +745,8 @@ void fcoe_watchdog(ulong vp) ...@@ -753,8 +745,8 @@ void fcoe_watchdog(ulong vp)
* *
* This empties the wait_queue, dequeue the head of the wait_queue queue * This empties the wait_queue, dequeue the head of the wait_queue queue
* and calls fcoe_start_io() for each packet, if all skb have been * and calls fcoe_start_io() for each packet, if all skb have been
* transmitted, return 0 if a error occurs, then restore wait_queue and * transmitted, return qlen or -1 if a error occurs, then restore
* try again later. * wait_queue and try again later.
* *
* The wait_queue is used when the skb transmit fails. skb will go * The wait_queue is used when the skb transmit fails. skb will go
* in the wait_queue which will be emptied by the time function OR * in the wait_queue which will be emptied by the time function OR
...@@ -764,33 +756,38 @@ void fcoe_watchdog(ulong vp) ...@@ -764,33 +756,38 @@ void fcoe_watchdog(ulong vp)
*/ */
static int fcoe_check_wait_queue(struct fc_lport *lp) static int fcoe_check_wait_queue(struct fc_lport *lp)
{ {
int rc;
struct sk_buff *skb; struct sk_buff *skb;
struct fcoe_softc *fc; struct fcoe_softc *fc;
int rc = -1;
fc = lport_priv(lp); fc = lport_priv(lp);
spin_lock_bh(&fc->fcoe_pending_queue.lock); spin_lock_bh(&fc->fcoe_pending_queue.lock);
/* if (fc->fcoe_pending_queue_active)
* if interface pending queue full then set qfull in lport. goto out;
*/ fc->fcoe_pending_queue_active = 1;
if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
lp->qfull = 1;
if (fc->fcoe_pending_queue.qlen) { if (fc->fcoe_pending_queue.qlen) {
while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
spin_unlock_bh(&fc->fcoe_pending_queue.lock); spin_unlock_bh(&fc->fcoe_pending_queue.lock);
rc = fcoe_start_io(skb); rc = fcoe_start_io(skb);
if (rc) { if (rc)
fcoe_insert_wait_queue_head(lp, skb); fcoe_insert_wait_queue_head(lp, skb);
return rc;
}
spin_lock_bh(&fc->fcoe_pending_queue.lock); spin_lock_bh(&fc->fcoe_pending_queue.lock);
if (rc)
break;
} }
if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) /*
* if interface pending queue is below FCOE_LOW_QUEUE_DEPTH
* then clear qfull flag.
*/
if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
lp->qfull = 0; lp->qfull = 0;
} }
fc->fcoe_pending_queue_active = 0;
rc = fc->fcoe_pending_queue.qlen;
out:
spin_unlock_bh(&fc->fcoe_pending_queue.lock); spin_unlock_bh(&fc->fcoe_pending_queue.lock);
return fc->fcoe_pending_queue.qlen; return rc;
} }
/** /**
......
...@@ -46,6 +46,7 @@ struct fcoe_softc { ...@@ -46,6 +46,7 @@ struct fcoe_softc {
struct net_device *phys_dev; /* device with ethtool_ops */ struct net_device *phys_dev; /* device with ethtool_ops */
struct packet_type fcoe_packet_type; struct packet_type fcoe_packet_type;
struct sk_buff_head fcoe_pending_queue; struct sk_buff_head fcoe_pending_queue;
u8 fcoe_pending_queue_active;
u8 dest_addr[ETH_ALEN]; u8 dest_addr[ETH_ALEN];
u8 ctl_src_addr[ETH_ALEN]; u8 ctl_src_addr[ETH_ALEN];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment