Commit abd49676 authored by Ram Amrani's avatar Ram Amrani Committed by David S. Miller

qed: Add RoCE ll2 & GSI support

Add the RoCE-specific LL2 logic [as well as GSI support] over
the 'generic' LL2 interface.
Signed-off-by: default avatarRam Amrani <Ram.Amrani@caviumnetworks.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@caviumnetworks.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ee8eaea3
...@@ -437,6 +437,7 @@ struct qed_hwfn { ...@@ -437,6 +437,7 @@ struct qed_hwfn {
#endif #endif
struct z_stream_s *stream; struct z_stream_s *stream;
struct qed_roce_ll2_info *ll2;
}; };
struct pci_params { struct pci_params {
......
...@@ -727,6 +727,9 @@ struct core_tx_bd_flags { ...@@ -727,6 +727,9 @@ struct core_tx_bd_flags {
#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
}; };
struct core_tx_bd { struct core_tx_bd {
......
...@@ -278,6 +278,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -278,6 +278,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
struct qed_ll2_tx_packet *p_pkt = NULL; struct qed_ll2_tx_packet *p_pkt = NULL;
struct qed_ll2_info *p_ll2_conn; struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_tx_queue *p_tx; struct qed_ll2_tx_queue *p_tx;
dma_addr_t tx_frag;
p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
if (!p_ll2_conn) if (!p_ll2_conn)
...@@ -297,11 +298,22 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -297,11 +298,22 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_tx->cur_completing_packet = *p_pkt; p_tx->cur_completing_packet = *p_pkt;
p_tx->cur_completing_bd_idx = 1; p_tx->cur_completing_bd_idx = 1;
b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
tx_frag = p_pkt->bds_set[0].tx_frag;
if (p_ll2_conn->gsi_enable)
qed_ll2b_release_tx_gsi_packet(p_hwfn,
p_ll2_conn->my_id,
p_pkt->cookie,
tx_frag,
b_last_frag,
b_last_packet);
else
qed_ll2b_complete_tx_packet(p_hwfn,
p_ll2_conn->my_id,
p_pkt->cookie,
tx_frag,
b_last_frag,
b_last_packet);
qed_ll2b_complete_tx_packet(p_hwfn, p_ll2_conn->my_id,
p_pkt->cookie,
p_pkt->bds_set[0].tx_frag,
b_last_frag, b_last_packet);
} }
} }
...@@ -313,6 +325,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) ...@@ -313,6 +325,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_tx_packet *p_pkt; struct qed_ll2_tx_packet *p_pkt;
bool b_last_frag = false; bool b_last_frag = false;
unsigned long flags; unsigned long flags;
dma_addr_t tx_frag;
int rc = -EINVAL; int rc = -EINVAL;
spin_lock_irqsave(&p_tx->lock, flags); spin_lock_irqsave(&p_tx->lock, flags);
...@@ -353,11 +366,19 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) ...@@ -353,11 +366,19 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
spin_unlock_irqrestore(&p_tx->lock, flags); spin_unlock_irqrestore(&p_tx->lock, flags);
qed_ll2b_complete_tx_packet(p_hwfn, tx_frag = p_pkt->bds_set[0].tx_frag;
p_ll2_conn->my_id, if (p_ll2_conn->gsi_enable)
p_pkt->cookie, qed_ll2b_complete_tx_gsi_packet(p_hwfn,
p_pkt->bds_set[0].tx_frag, p_ll2_conn->my_id,
b_last_frag, !num_bds); p_pkt->cookie,
tx_frag,
b_last_frag, !num_bds);
else
qed_ll2b_complete_tx_packet(p_hwfn,
p_ll2_conn->my_id,
p_pkt->cookie,
tx_frag,
b_last_frag, !num_bds);
spin_lock_irqsave(&p_tx->lock, flags); spin_lock_irqsave(&p_tx->lock, flags);
} }
...@@ -368,6 +389,54 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) ...@@ -368,6 +389,54 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
return rc; return rc;
} }
static int
qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info,
union core_rx_cqe_union *p_cqe,
unsigned long lock_flags, bool b_last_cqe)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
struct qed_ll2_rx_packet *p_pkt = NULL;
u16 packet_length, parse_flags, vlan;
u32 src_mac_addrhi;
u16 src_mac_addrlo;
if (!list_empty(&p_rx->active_descq))
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
if (!p_pkt) {
DP_NOTICE(p_hwfn,
"GSI Rx completion but active_descq is empty\n");
return -EIO;
}
list_del(&p_pkt->list_entry);
parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
DP_NOTICE(p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
spin_unlock_irqrestore(&p_rx->lock, lock_flags);
qed_ll2b_complete_rx_gsi_packet(p_hwfn,
p_ll2_info->my_id,
p_pkt->cookie,
p_pkt->rx_buf_addr,
packet_length,
p_cqe->rx_cqe_gsi.data_length_error,
parse_flags,
vlan,
src_mac_addrhi,
src_mac_addrlo, b_last_cqe);
spin_lock_irqsave(&p_rx->lock, lock_flags);
return 0;
}
static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn, struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe, union core_rx_cqe_union *p_cqe,
...@@ -429,6 +498,10 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) ...@@ -429,6 +498,10 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n"); DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
rc = -EINVAL; rc = -EINVAL;
break; break;
case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
cqe, flags, b_last_cqe);
break;
case CORE_RX_CQE_TYPE_REGULAR: case CORE_RX_CQE_TYPE_REGULAR:
rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn, rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
cqe, flags, b_last_cqe); cqe, flags, b_last_cqe);
...@@ -527,6 +600,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -527,6 +600,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
} }
p_ramrod->action_on_error.error_type = action_on_error; p_ramrod->action_on_error.error_type = action_on_error;
p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
...@@ -589,6 +663,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -589,6 +663,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
} }
p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
...@@ -775,6 +850,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, ...@@ -775,6 +850,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
p_ll2_info->tx_dest = p_params->tx_dest; p_ll2_info->tx_dest = p_params->tx_dest;
p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big; p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf; p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
p_ll2_info->gsi_enable = p_params->gsi_enable;
rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
if (rc) if (rc)
...@@ -1026,6 +1102,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, ...@@ -1026,6 +1102,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
u16 vlan, u16 vlan,
u8 bd_flags, u8 bd_flags,
u16 l4_hdr_offset_w, u16 l4_hdr_offset_w,
enum core_roce_flavor_type type,
dma_addr_t first_frag, dma_addr_t first_frag,
u16 first_frag_len) u16 first_frag_len)
{ {
...@@ -1046,6 +1123,9 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, ...@@ -1046,6 +1123,9 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(start_bd->addr, first_frag); DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len); start_bd->nbytes = cpu_to_le16(first_frag_len);
SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
type);
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2), (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
...@@ -1137,11 +1217,13 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1137,11 +1217,13 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
u16 vlan, u16 vlan,
u8 bd_flags, u8 bd_flags,
u16 l4_hdr_offset_w, u16 l4_hdr_offset_w,
enum qed_ll2_roce_flavor_type qed_roce_flavor,
dma_addr_t first_frag, dma_addr_t first_frag,
u16 first_frag_len, void *cookie, u8 notify_fw) u16 first_frag_len, void *cookie, u8 notify_fw)
{ {
struct qed_ll2_tx_packet *p_curp = NULL; struct qed_ll2_tx_packet *p_curp = NULL;
struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_info *p_ll2_conn = NULL;
enum core_roce_flavor_type roce_flavor;
struct qed_ll2_tx_queue *p_tx; struct qed_ll2_tx_queue *p_tx;
struct qed_chain *p_tx_chain; struct qed_chain *p_tx_chain;
unsigned long flags; unsigned long flags;
...@@ -1174,6 +1256,15 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1174,6 +1256,15 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
goto out; goto out;
} }
if (qed_roce_flavor == QED_LL2_ROCE) {
roce_flavor = CORE_ROCE;
} else if (qed_roce_flavor == QED_LL2_RROCE) {
roce_flavor = CORE_RROCE;
} else {
rc = -EINVAL;
goto out;
}
/* Prepare packet and BD, and perhaps send a doorbell to FW */ /* Prepare packet and BD, and perhaps send a doorbell to FW */
qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
num_of_bds, first_frag, num_of_bds, first_frag,
...@@ -1181,6 +1272,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1181,6 +1272,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
num_of_bds, CORE_TX_DEST_NW, num_of_bds, CORE_TX_DEST_NW,
vlan, bd_flags, l4_hdr_offset_w, vlan, bd_flags, l4_hdr_offset_w,
roce_flavor,
first_frag, first_frag_len); first_frag, first_frag_len);
qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
...@@ -1476,6 +1568,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ...@@ -1476,6 +1568,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
ll2_info.tx_tc = 0; ll2_info.tx_tc = 0;
ll2_info.tx_dest = CORE_TX_DEST_NW; ll2_info.tx_dest = CORE_TX_DEST_NW;
ll2_info.gsi_enable = 1;
rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info, rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
...@@ -1625,8 +1718,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) ...@@ -1625,8 +1718,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
cdev->ll2->handle, cdev->ll2->handle,
1 + skb_shinfo(skb)->nr_frags, 1 + skb_shinfo(skb)->nr_frags,
vlan, flags, 0, mapping, vlan, flags, 0, 0 /* RoCE FLAVOR */,
skb->len, skb, 1); mapping, skb->len, skb, 1);
if (rc) if (rc)
goto err; goto err;
......
...@@ -24,6 +24,12 @@ ...@@ -24,6 +24,12 @@
#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4) #define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
enum qed_ll2_roce_flavor_type {
QED_LL2_ROCE,
QED_LL2_RROCE,
MAX_QED_LL2_ROCE_FLAVOR_TYPE
};
enum qed_ll2_conn_type { enum qed_ll2_conn_type {
QED_LL2_TYPE_RESERVED, QED_LL2_TYPE_RESERVED,
QED_LL2_TYPE_ISCSI, QED_LL2_TYPE_ISCSI,
...@@ -119,6 +125,7 @@ struct qed_ll2_info { ...@@ -119,6 +125,7 @@ struct qed_ll2_info {
u8 tx_stats_en; u8 tx_stats_en;
struct qed_ll2_rx_queue rx_queue; struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue; struct qed_ll2_tx_queue tx_queue;
u8 gsi_enable;
}; };
/** /**
...@@ -199,6 +206,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -199,6 +206,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
u16 vlan, u16 vlan,
u8 bd_flags, u8 bd_flags,
u16 l4_hdr_offset_w, u16 l4_hdr_offset_w,
enum qed_ll2_roce_flavor_type qed_roce_flavor,
dma_addr_t first_frag, dma_addr_t first_frag,
u16 first_frag_len, void *cookie, u8 notify_fw); u16 first_frag_len, void *cookie, u8 notify_fw);
...@@ -285,5 +293,24 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn, ...@@ -285,5 +293,24 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
*/ */
void qed_ll2_free(struct qed_hwfn *p_hwfn, void qed_ll2_free(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_connections); struct qed_ll2_info *p_ll2_connections);
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo, bool b_last_packet);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
#endif #endif
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_roce.h" #include "qed_roce.h"
#include "qed_ll2.h"
void qed_async_roce_event(struct qed_hwfn *p_hwfn, void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe) struct event_ring_entry *p_eqe)
...@@ -2611,6 +2612,306 @@ void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) ...@@ -2611,6 +2612,306 @@ void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock); spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
} }
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet)
{
struct qed_roce_ll2_packet *packet = cookie;
struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
}
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet)
{
qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
cookie, first_frag_addr,
b_last_fragment, b_last_packet);
}
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo, bool b_last_packet)
{
struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
struct qed_roce_ll2_rx_params params;
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_roce_ll2_packet pkt;
DP_VERBOSE(cdev,
QED_MSG_LL2,
"roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
(void *)(uintptr_t)rx_buf_addr,
data_length, data_length_error);
memset(&pkt, 0, sizeof(pkt));
pkt.n_seg = 1;
pkt.payload[0].baddr = rx_buf_addr;
pkt.payload[0].len = data_length;
memset(&params, 0, sizeof(params));
params.vlan_id = vlan;
*((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
*((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
if (data_length_error) {
DP_ERR(cdev,
"roce ll2 rx complete: data length error %d, length=%d\n",
data_length_error, data_length);
params.rc = -EINVAL;
}
roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
}
static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
u8 *old_mac_address,
u8 *new_mac_address)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt;
int rc = 0;
if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
DP_ERR(cdev,
"qed roce mac filter failed - roce_info/ll2 NULL\n");
return -EINVAL;
}
p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (!p_ptt) {
DP_ERR(cdev,
"qed roce ll2 mac filter set: failed to acquire PTT\n");
return -EINVAL;
}
mutex_lock(&hwfn->ll2->lock);
if (old_mac_address)
qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
old_mac_address);
if (new_mac_address)
rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
new_mac_address);
mutex_unlock(&hwfn->ll2->lock);
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
if (rc)
DP_ERR(cdev,
"qed roce ll2 mac filter set: failed to add mac filter\n");
return rc;
}
static int qed_roce_ll2_start(struct qed_dev *cdev,
struct qed_roce_ll2_params *params)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2;
struct qed_ll2_info ll2_params;
int rc;
if (!params) {
DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
return -EINVAL;
}
if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
DP_ERR(cdev,
"qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
params->cbs.tx_cb, params->cbs.rx_cb);
return -EINVAL;
}
if (!is_valid_ether_addr(params->mac_address)) {
DP_ERR(cdev,
"qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
params->mac_address);
return -EINVAL;
}
/* Initialize */
roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
if (!roce_ll2) {
DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
return -ENOMEM;
}
memset(roce_ll2, 0, sizeof(*roce_ll2));
roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
roce_ll2->cbs = params->cbs;
roce_ll2->cb_cookie = params->cb_cookie;
mutex_init(&roce_ll2->lock);
memset(&ll2_params, 0, sizeof(ll2_params));
ll2_params.conn_type = QED_LL2_TYPE_ROCE;
ll2_params.mtu = params->mtu;
ll2_params.rx_drop_ttl0_flg = true;
ll2_params.rx_vlan_removal_en = false;
ll2_params.tx_dest = CORE_TX_DEST_NW;
ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
ll2_params.gsi_enable = true;
rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
params->max_rx_buffers,
params->max_tx_buffers,
&roce_ll2->handle);
if (rc) {
DP_ERR(cdev,
"qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
rc);
goto err;
}
rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
roce_ll2->handle);
if (rc) {
DP_ERR(cdev,
"qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
rc);
goto err1;
}
hwfn->ll2 = roce_ll2;
rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
if (rc) {
hwfn->ll2 = NULL;
goto err2;
}
ether_addr_copy(roce_ll2->mac_address, params->mac_address);
return 0;
err2:
qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
err1:
qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
err:
kfree(roce_ll2);
return rc;
}
static int qed_roce_ll2_stop(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
int rc;
if (!cdev) {
DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
return -EINVAL;
}
if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
return -EINVAL;
}
/* remove LL2 MAC address filter */
rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
eth_zero_addr(roce_ll2->mac_address);
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
roce_ll2->handle);
if (rc)
DP_ERR(cdev,
"qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
rc);
qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
kfree(roce_ll2);
return rc;
}
static int qed_roce_ll2_tx(struct qed_dev *cdev,
struct qed_roce_ll2_packet *pkt,
struct qed_roce_ll2_tx_params *params)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
enum qed_ll2_roce_flavor_type qed_roce_flavor;
u8 flags = 0;
int rc;
int i;
if (!cdev || !pkt || !params) {
DP_ERR(cdev,
"roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
cdev, pkt, params);
return -EINVAL;
}
qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
: QED_LL2_RROCE;
if (pkt->roce_mode == ROCE_V2_IPV4)
flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
/* Tx header */
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
1 + pkt->n_seg, 0, flags, 0,
qed_roce_flavor, pkt->header.baddr,
pkt->header.len, pkt, 1);
if (rc) {
DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
return QED_ROCE_TX_HEAD_FAILURE;
}
/* Tx payload */
for (i = 0; i < pkt->n_seg; i++) {
rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
roce_ll2->handle,
pkt->payload[i].baddr,
pkt->payload[i].len);
if (rc) {
/* If failed not much to do here, partial packet has
* been posted * we can't free memory, will need to wait
* for completion
*/
DP_ERR(cdev,
"roce ll2 tx: payload failed (rc=%d)\n", rc);
return QED_ROCE_TX_FRAG_FAILURE;
}
}
return 0;
}
static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
struct qed_roce_ll2_buffer *buf,
u64 cookie, u8 notify_fw)
{
return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
QED_LEADING_HWFN(cdev)->ll2->handle,
buf->baddr, buf->len,
(void *)(uintptr_t)cookie, notify_fw);
}
static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
roce_ll2->handle, stats);
}
static const struct qed_rdma_ops qed_rdma_ops_pass = { static const struct qed_rdma_ops qed_rdma_ops_pass = {
.common = &qed_common_ops_pass, .common = &qed_common_ops_pass,
.fill_dev_info = &qed_fill_rdma_dev_info, .fill_dev_info = &qed_fill_rdma_dev_info,
...@@ -2638,6 +2939,12 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { ...@@ -2638,6 +2939,12 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.rdma_free_tid = &qed_rdma_free_tid, .rdma_free_tid = &qed_rdma_free_tid,
.rdma_register_tid = &qed_rdma_register_tid, .rdma_register_tid = &qed_rdma_register_tid,
.rdma_deregister_tid = &qed_rdma_deregister_tid, .rdma_deregister_tid = &qed_rdma_deregister_tid,
.roce_ll2_start = &qed_roce_ll2_start,
.roce_ll2_stop = &qed_roce_ll2_stop,
.roce_ll2_tx = &qed_roce_ll2_tx,
.roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
.roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
.roce_ll2_stats = &qed_roce_ll2_stats,
}; };
const struct qed_rdma_ops *qed_get_rdma_ops() const struct qed_rdma_ops *qed_get_rdma_ops()
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "qed.h" #include "qed.h"
#include "qed_dev_api.h" #include "qed_dev_api.h"
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_ll2.h"
#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS) #define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
#define QED_RDMA_MAX_P_KEY (1) #define QED_RDMA_MAX_P_KEY (1)
......
...@@ -39,6 +39,16 @@ ...@@ -39,6 +39,16 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h> #include <linux/qed/qed_ll2_if.h>
#include <linux/qed/rdma_common.h>
enum qed_roce_ll2_tx_dest {
/* Light L2 TX Destination to the Network */
QED_ROCE_LL2_TX_DEST_NW,
/* Light L2 TX Destination to the Loopback */
QED_ROCE_LL2_TX_DEST_LB,
QED_ROCE_LL2_TX_DEST_MAX
};
#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) #define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
...@@ -461,6 +471,61 @@ struct qed_rdma_counters_out_params { ...@@ -461,6 +471,61 @@ struct qed_rdma_counters_out_params {
#define QED_ROCE_TX_HEAD_FAILURE (1) #define QED_ROCE_TX_HEAD_FAILURE (1)
#define QED_ROCE_TX_FRAG_FAILURE (2) #define QED_ROCE_TX_FRAG_FAILURE (2)
struct qed_roce_ll2_header {
void *vaddr;
dma_addr_t baddr;
size_t len;
};
struct qed_roce_ll2_buffer {
dma_addr_t baddr;
size_t len;
};
struct qed_roce_ll2_packet {
struct qed_roce_ll2_header header;
int n_seg;
struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
int roce_mode;
enum qed_roce_ll2_tx_dest tx_dest;
};
struct qed_roce_ll2_tx_params {
int reserved;
};
struct qed_roce_ll2_rx_params {
u16 vlan_id;
u8 smac[ETH_ALEN];
int rc;
};
struct qed_roce_ll2_cbs {
void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
struct qed_roce_ll2_rx_params *params);
};
struct qed_roce_ll2_params {
u16 max_rx_buffers;
u16 max_tx_buffers;
u16 mtu;
u8 mac_address[ETH_ALEN];
struct qed_roce_ll2_cbs cbs;
void *cb_cookie;
};
struct qed_roce_ll2_info {
u8 handle;
struct qed_roce_ll2_cbs cbs;
u8 mac_address[ETH_ALEN];
void *cb_cookie;
/* Lock to protect ll2 */
struct mutex lock;
};
enum qed_rdma_type { enum qed_rdma_type {
QED_RDMA_TYPE_ROCE, QED_RDMA_TYPE_ROCE,
}; };
...@@ -518,6 +583,20 @@ struct qed_rdma_ops { ...@@ -518,6 +583,20 @@ struct qed_rdma_ops {
int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
void (*rdma_free_tid)(void *rdma_cxt, u32 itid); void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
int (*roce_ll2_start)(struct qed_dev *cdev,
struct qed_roce_ll2_params *params);
int (*roce_ll2_stop)(struct qed_dev *cdev);
int (*roce_ll2_tx)(struct qed_dev *cdev,
struct qed_roce_ll2_packet *packet,
struct qed_roce_ll2_tx_params *params);
int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
struct qed_roce_ll2_buffer *buf,
u64 cookie, u8 notify_fw);
int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
u8 *old_mac_address,
u8 *new_mac_address);
int (*roce_ll2_stats)(struct qed_dev *cdev,
struct qed_ll2_stats *stats);
}; };
const struct qed_rdma_ops *qed_get_rdma_ops(void); const struct qed_rdma_ops *qed_get_rdma_ops(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment