Commit 8597ae8b authored by Bhanu Prakash Gollapudi's avatar Bhanu Prakash Gollapudi Committed by James Bottomley

[SCSI] libfcoe: Move common code from fcoe to libfcoe module

To facilitate LLDDs to reuse the code, skb queue related functions are moved to
libfcoe, so that both fcoe and bnx2fc drivers can use them. The common structures
fcoe_port, fcoe_percpu_s are moved to libfcoe. fcoe_port will now have an
opaque pointer that points to corresponding driver's interface structure.
Also, fcoe_start_io and fcoe_fc_crc are moved to libfcoe.

As part of this change, fixed fcoe_start_io to return ENOMEM if
skb_clone fails.
Signed-off-by: default avatarBhanu Prakash Gollapudi <bprakash@broadcom.com>
Signed-off-by: default avatarRobert Love <robert.w.love@intel.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent 2ca32b48
This diff is collapsed.
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#define FCOE_MAX_QUEUE_DEPTH 256 #define FCOE_MAX_QUEUE_DEPTH 256
#define FCOE_LOW_QUEUE_DEPTH 32 #define FCOE_MIN_QUEUE_DEPTH 32
#define FCOE_WORD_TO_BYTE 4 #define FCOE_WORD_TO_BYTE 4
...@@ -70,21 +70,6 @@ do { \ ...@@ -70,21 +70,6 @@ do { \
printk(KERN_INFO "fcoe: %s: " fmt, \ printk(KERN_INFO "fcoe: %s: " fmt, \
netdev->name, ##args);) netdev->name, ##args);)
/**
* struct fcoe_percpu_s - The per-CPU context for FCoE receive threads
* @thread: The thread context
* @fcoe_rx_list: The queue of pending packets to process
* @page: The memory page for calculating frame trailer CRCs
* @crc_eof_offset: The offset into the CRC page pointing to available
* memory for a new trailer
*/
struct fcoe_percpu_s {
struct task_struct *thread;
struct sk_buff_head fcoe_rx_list;
struct page *crc_eof_page;
int crc_eof_offset;
};
/** /**
* struct fcoe_interface - A FCoE interface * struct fcoe_interface - A FCoE interface
* @list: Handle for a list of FCoE interfaces * @list: Handle for a list of FCoE interfaces
...@@ -108,30 +93,6 @@ struct fcoe_interface { ...@@ -108,30 +93,6 @@ struct fcoe_interface {
struct kref kref; struct kref kref;
}; };
/**
* struct fcoe_port - The FCoE private structure
* @fcoe: The associated fcoe interface
* @lport: The associated local port
* @fcoe_pending_queue: The pending Rx queue of skbs
* @fcoe_pending_queue_active: Indicates if the pending queue is active
* @timer: The queue timer
* @destroy_work: Handle for work context
* (to prevent RTNL deadlocks)
* @data_srt_addr: Source address for data
*
* An instance of this structure is to be allocated along with the
* Scsi_Host and libfc fc_lport structures.
*/
struct fcoe_port {
struct fcoe_interface *fcoe;
struct fc_lport *lport;
struct sk_buff_head fcoe_pending_queue;
u8 fcoe_pending_queue_active;
struct timer_list timer;
struct work_struct destroy_work;
u8 data_src_addr[ETH_ALEN];
};
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
/** /**
...@@ -140,7 +101,8 @@ struct fcoe_port { ...@@ -140,7 +101,8 @@ struct fcoe_port {
*/ */
static inline struct net_device *fcoe_netdev(const struct fc_lport *lport) static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
{ {
return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev; return ((struct fcoe_interface *)
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
} }
#endif /* _FCOE_H_ */ #endif /* _FCOE_H_ */
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/crc32.h>
#include <scsi/libfcoe.h> #include <scsi/libfcoe.h>
#include "libfcoe.h" #include "libfcoe.h"
...@@ -74,6 +75,205 @@ module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR); ...@@ -74,6 +75,205 @@ module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR);
__MODULE_PARM_TYPE(disable, "string"); __MODULE_PARM_TYPE(disable, "string");
MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
/**
* fcoe_fc_crc() - Calculates the CRC for a given frame
* @fp: The frame to be checksumed
*
* This uses crc32() routine to calculate the CRC for a frame
*
* Return: The 32 bit CRC value
*/
u32 fcoe_fc_crc(struct fc_frame *fp)
{
struct sk_buff *skb = fp_skb(fp);
struct skb_frag_struct *frag;
unsigned char *data;
unsigned long off, len, clen;
u32 crc;
unsigned i;
crc = crc32(~0, skb->data, skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
off = frag->page_offset;
len = frag->size;
while (len > 0) {
clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
KM_SKB_DATA_SOFTIRQ);
crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
off += clen;
len -= clen;
}
}
return crc;
}
EXPORT_SYMBOL_GPL(fcoe_fc_crc);
/**
* fcoe_start_io() - Start FCoE I/O
* @skb: The packet to be transmitted
*
* This routine is called from the net device to start transmitting
* FCoE packets.
*
* Returns: 0 for success
*/
int fcoe_start_io(struct sk_buff *skb)
{
struct sk_buff *nskb;
int rc;
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
rc = dev_queue_xmit(nskb);
if (rc != 0)
return rc;
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_start_io);
/**
* fcoe_clean_pending_queue() - Dequeue a skb and free it
* @lport: The local port to dequeue a skb on
*/
void fcoe_clean_pending_queue(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
struct sk_buff *skb;
spin_lock_bh(&port->fcoe_pending_queue.lock);
while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
spin_unlock_bh(&port->fcoe_pending_queue.lock);
kfree_skb(skb);
spin_lock_bh(&port->fcoe_pending_queue.lock);
}
spin_unlock_bh(&port->fcoe_pending_queue.lock);
}
EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
/**
* fcoe_check_wait_queue() - Attempt to clear the transmit backlog
* @lport: The local port whose backlog is to be cleared
*
* This empties the wait_queue, dequeues the head of the wait_queue queue
* and calls fcoe_start_io() for each packet. If all skb have been
* transmitted it returns the qlen. If an error occurs it restores
* wait_queue (to try again later) and returns -1.
*
* The wait_queue is used when the skb transmit fails. The failed skb
* will go in the wait_queue which will be emptied by the timer function or
* by the next skb transmit.
*/
void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
{
struct fcoe_port *port = lport_priv(lport);
int rc;
spin_lock_bh(&port->fcoe_pending_queue.lock);
if (skb)
__skb_queue_tail(&port->fcoe_pending_queue, skb);
if (port->fcoe_pending_queue_active)
goto out;
port->fcoe_pending_queue_active = 1;
while (port->fcoe_pending_queue.qlen) {
/* keep qlen > 0 until fcoe_start_io succeeds */
port->fcoe_pending_queue.qlen++;
skb = __skb_dequeue(&port->fcoe_pending_queue);
spin_unlock_bh(&port->fcoe_pending_queue.lock);
rc = fcoe_start_io(skb);
spin_lock_bh(&port->fcoe_pending_queue.lock);
if (rc) {
__skb_queue_head(&port->fcoe_pending_queue, skb);
/* undo temporary increment above */
port->fcoe_pending_queue.qlen--;
break;
}
/* undo temporary increment above */
port->fcoe_pending_queue.qlen--;
}
if (port->fcoe_pending_queue.qlen < port->min_queue_depth)
lport->qfull = 0;
if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
mod_timer(&port->timer, jiffies + 2);
port->fcoe_pending_queue_active = 0;
out:
if (port->fcoe_pending_queue.qlen > port->max_queue_depth)
lport->qfull = 1;
spin_unlock_bh(&port->fcoe_pending_queue.lock);
}
EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
/**
* fcoe_queue_timer() - The fcoe queue timer
* @lport: The local port
*
* Calls fcoe_check_wait_queue on timeout
*/
void fcoe_queue_timer(ulong lport)
{
fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
}
EXPORT_SYMBOL_GPL(fcoe_queue_timer);
/**
* fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
* @skb: The packet to be transmitted
* @tlen: The total length of the trailer
* @fps: The fcoe context
*
* This routine allocates a page for frame trailers. The page is re-used if
* there is enough room left on it for the current trailer. If there isn't
* enough buffer left a new page is allocated for the trailer. Reference to
* the page from this function as well as the skbs using the page fragments
* ensure that the page is freed at the appropriate time.
*
* Returns: 0 for success
*/
int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
struct fcoe_percpu_s *fps)
{
struct page *page;
page = fps->crc_eof_page;
if (!page) {
page = alloc_page(GFP_ATOMIC);
if (!page)
return -ENOMEM;
fps->crc_eof_page = page;
fps->crc_eof_offset = 0;
}
get_page(page);
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
fps->crc_eof_offset, tlen);
skb->len += tlen;
skb->data_len += tlen;
skb->truesize += tlen;
fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
if (fps->crc_eof_offset >= PAGE_SIZE) {
fps->crc_eof_page = NULL;
fps->crc_eof_offset = 0;
put_page(page);
}
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof);
/** /**
* fcoe_transport_lookup - find an fcoe transport that matches a netdev * fcoe_transport_lookup - find an fcoe transport that matches a netdev
* @netdev: The netdev to look for from all attached transports * @netdev: The netdev to look for from all attached transports
......
...@@ -221,6 +221,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *, ...@@ -221,6 +221,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *, int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
const struct libfc_function_template *, int init_fcp); const struct libfc_function_template *, int init_fcp);
u32 fcoe_fc_crc(struct fc_frame *fp);
int fcoe_start_io(struct sk_buff *skb);
/** /**
* is_fip_mode() - returns true if FIP mode selected. * is_fip_mode() - returns true if FIP mode selected.
...@@ -266,6 +268,55 @@ struct fcoe_transport { ...@@ -266,6 +268,55 @@ struct fcoe_transport {
int (*disable) (struct net_device *device); int (*disable) (struct net_device *device);
}; };
/**
* struct fcoe_percpu_s - The context for FCoE receive thread(s)
* @thread: The thread context
* @fcoe_rx_list: The queue of pending packets to process
* @page: The memory page for calculating frame trailer CRCs
* @crc_eof_offset: The offset into the CRC page pointing to available
* memory for a new trailer
*/
struct fcoe_percpu_s {
struct task_struct *thread;
struct sk_buff_head fcoe_rx_list;
struct page *crc_eof_page;
int crc_eof_offset;
};
/**
* struct fcoe_port - The FCoE private structure
* @priv: The associated fcoe interface. The structure is
* defined by the low level driver
* @lport: The associated local port
* @fcoe_pending_queue: The pending Rx queue of skbs
* @fcoe_pending_queue_active: Indicates if the pending queue is active
* @max_queue_depth: Max queue depth of pending queue
* @min_queue_depth: Min queue depth of pending queue
* @timer: The queue timer
* @destroy_work: Handle for work context
* (to prevent RTNL deadlocks)
* @data_srt_addr: Source address for data
*
* An instance of this structure is to be allocated along with the
* Scsi_Host and libfc fc_lport structures.
*/
struct fcoe_port {
void *priv;
struct fc_lport *lport;
struct sk_buff_head fcoe_pending_queue;
u8 fcoe_pending_queue_active;
u32 max_queue_depth;
u32 min_queue_depth;
struct timer_list timer;
struct work_struct destroy_work;
u8 data_src_addr[ETH_ALEN];
};
void fcoe_clean_pending_queue(struct fc_lport *);
void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb);
void fcoe_queue_timer(ulong lport);
int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
struct fcoe_percpu_s *fps);
/** /**
* struct netdev_list * struct netdev_list
* A mapping from netdevice to fcoe_transport * A mapping from netdevice to fcoe_transport
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment