Commit e33c2482 authored by Varun Prakash's avatar Varun Prakash Committed by Martin K. Petersen

scsi: cxgb4i: Add support for iSCSI segmentation offload

T5/T6 adapters support iSCSI segmentation offload.

To transmit iSCSI PDUs using ISO driver provides iSCSI header and data
scatterlist to the adapter, adapter forms multiple iSCSI PDUs and transmits
them.

[mkp: checkpatch]

Link: https://lore.kernel.org/r/1593448871-2972-1-git-send-email-varun@chelsio.comSigned-off-by: default avatarVarun Prakash <varun@chelsio.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 0a765665
...@@ -361,7 +361,7 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, ...@@ -361,7 +361,7 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
/* len includes the length of any HW ULP additions */ /* len includes the length of any HW ULP additions */
req->len = htonl(len); req->len = htonl(len);
/* V_TX_ULP_SUBMODE sets both the mode and submode */ /* V_TX_ULP_SUBMODE sets both the mode and submode */
req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) | req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_tx_ulp_mode(skb)) |
V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1))); V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
req->sndseq = htonl(csk->snd_nxt); req->sndseq = htonl(csk->snd_nxt);
req->param = htonl(V_TX_PORT(l2t->smt_idx)); req->param = htonl(V_TX_PORT(l2t->smt_idx));
...@@ -442,7 +442,7 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) ...@@ -442,7 +442,7 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
req_completion = 1; req_completion = 1;
csk->wr_una_cred = 0; csk->wr_una_cred = 0;
} }
len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); len += cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb));
make_tx_data_wr(csk, skb, len, req_completion); make_tx_data_wr(csk, skb, len, req_completion);
csk->snd_nxt += len; csk->snd_nxt += len;
cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
......
This diff is collapsed.
This diff is collapsed.
...@@ -76,6 +76,14 @@ do { \ ...@@ -76,6 +76,14 @@ do { \
#define ULP2_MAX_PDU_PAYLOAD \ #define ULP2_MAX_PDU_PAYLOAD \
(ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
#define CXGBI_ULP2_MAX_ISO_PAYLOAD 65535
#define CXGBI_MAX_ISO_DATA_IN_SKB \
min_t(u32, MAX_SKB_FRAGS << PAGE_SHIFT, CXGBI_ULP2_MAX_ISO_PAYLOAD)
#define cxgbi_is_iso_config(csk) ((csk)->cdev->skb_iso_txhdr)
#define cxgbi_is_iso_disabled(csk) ((csk)->disable_iso)
/* /*
* For iscsi connections HW may inserts digest bytes into the pdu. Those digest * For iscsi connections HW may inserts digest bytes into the pdu. Those digest
* bytes are not sent by the host but are part of the TCP payload and therefore * bytes are not sent by the host but are part of the TCP payload and therefore
...@@ -162,6 +170,10 @@ struct cxgbi_sock { ...@@ -162,6 +170,10 @@ struct cxgbi_sock {
u32 write_seq; u32 write_seq;
u32 snd_win; u32 snd_win;
u32 rcv_win; u32 rcv_win;
bool disable_iso;
u32 no_tx_credits;
unsigned long prev_iso_ts;
}; };
/* /*
...@@ -203,6 +215,8 @@ struct cxgbi_skb_tx_cb { ...@@ -203,6 +215,8 @@ struct cxgbi_skb_tx_cb {
void *handle; void *handle;
void *arp_err_handler; void *arp_err_handler;
struct sk_buff *wr_next; struct sk_buff *wr_next;
u16 iscsi_hdr_len;
u8 ulp_mode;
}; };
enum cxgbi_skcb_flags { enum cxgbi_skcb_flags {
...@@ -218,6 +232,7 @@ enum cxgbi_skcb_flags { ...@@ -218,6 +232,7 @@ enum cxgbi_skcb_flags {
SKCBF_RX_HCRC_ERR, /* header digest error */ SKCBF_RX_HCRC_ERR, /* header digest error */
SKCBF_RX_DCRC_ERR, /* data digest error */ SKCBF_RX_DCRC_ERR, /* data digest error */
SKCBF_RX_PAD_ERR, /* padding byte error */ SKCBF_RX_PAD_ERR, /* padding byte error */
SKCBF_TX_ISO, /* iso cpl in tx skb */
}; };
struct cxgbi_skb_cb { struct cxgbi_skb_cb {
...@@ -225,18 +240,18 @@ struct cxgbi_skb_cb { ...@@ -225,18 +240,18 @@ struct cxgbi_skb_cb {
struct cxgbi_skb_rx_cb rx; struct cxgbi_skb_rx_cb rx;
struct cxgbi_skb_tx_cb tx; struct cxgbi_skb_tx_cb tx;
}; };
unsigned char ulp_mode;
unsigned long flags; unsigned long flags;
unsigned int seq; unsigned int seq;
}; };
#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
#define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags) #define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags)
#define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode)
#define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq) #define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq)
#define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest) #define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest)
#define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen) #define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen)
#define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next) #define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next)
#define cxgbi_skcb_tx_iscsi_hdrlen(skb) (CXGBI_SKB_CB(skb)->tx.iscsi_hdr_len)
#define cxgbi_skcb_tx_ulp_mode(skb) (CXGBI_SKB_CB(skb)->tx.ulp_mode)
static inline void cxgbi_skcb_set_flag(struct sk_buff *skb, static inline void cxgbi_skcb_set_flag(struct sk_buff *skb,
enum cxgbi_skcb_flags flag) enum cxgbi_skcb_flags flag)
...@@ -458,6 +473,7 @@ struct cxgbi_ports_map { ...@@ -458,6 +473,7 @@ struct cxgbi_ports_map {
#define CXGBI_FLAG_IPV4_SET 0x10 #define CXGBI_FLAG_IPV4_SET 0x10
#define CXGBI_FLAG_USE_PPOD_OFLDQ 0x40 #define CXGBI_FLAG_USE_PPOD_OFLDQ 0x40
#define CXGBI_FLAG_DDP_OFF 0x100 #define CXGBI_FLAG_DDP_OFF 0x100
#define CXGBI_FLAG_DEV_ISO_OFF 0x400
struct cxgbi_device { struct cxgbi_device {
struct list_head list_head; struct list_head list_head;
...@@ -477,6 +493,7 @@ struct cxgbi_device { ...@@ -477,6 +493,7 @@ struct cxgbi_device {
unsigned int pfvf; unsigned int pfvf;
unsigned int rx_credit_thres; unsigned int rx_credit_thres;
unsigned int skb_tx_rsvd; unsigned int skb_tx_rsvd;
u32 skb_iso_txhdr;
unsigned int skb_rx_extra; /* for msg coalesced mode */ unsigned int skb_rx_extra; /* for msg coalesced mode */
unsigned int tx_max_size; unsigned int tx_max_size;
unsigned int rx_max_size; unsigned int rx_max_size;
...@@ -523,20 +540,41 @@ struct cxgbi_endpoint { ...@@ -523,20 +540,41 @@ struct cxgbi_endpoint {
struct cxgbi_sock *csk; struct cxgbi_sock *csk;
}; };
#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
struct cxgbi_task_data { struct cxgbi_task_data {
#define CXGBI_TASK_SGL_CHECKED 0x1
#define CXGBI_TASK_SGL_COPY 0x2
u8 flags;
unsigned short nr_frags; unsigned short nr_frags;
struct page_frag frags[MAX_PDU_FRAGS]; struct page_frag frags[MAX_SKB_FRAGS];
struct sk_buff *skb; struct sk_buff *skb;
unsigned int dlen; unsigned int dlen;
unsigned int offset; unsigned int offset;
unsigned int count; unsigned int count;
unsigned int sgoffset; unsigned int sgoffset;
u32 total_count;
u32 total_offset;
u32 max_xmit_dlength;
struct cxgbi_task_tag_info ttinfo; struct cxgbi_task_tag_info ttinfo;
}; };
#define iscsi_task_cxgbi_data(task) \ #define iscsi_task_cxgbi_data(task) \
((task)->dd_data + sizeof(struct iscsi_tcp_task)) ((task)->dd_data + sizeof(struct iscsi_tcp_task))
struct cxgbi_iso_info {
#define CXGBI_ISO_INFO_FSLICE 0x1
#define CXGBI_ISO_INFO_LSLICE 0x2
#define CXGBI_ISO_INFO_IMM_ENABLE 0x4
u8 flags;
u8 op;
u8 ahs;
u8 num_pdu;
u32 mpdu;
u32 burst_size;
u32 len;
u32 segment_offset;
u32 datasn_offset;
u32 buffer_offset;
};
static inline void *cxgbi_alloc_big_mem(unsigned int size, static inline void *cxgbi_alloc_big_mem(unsigned int size,
gfp_t gfp) gfp_t gfp)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment