Commit 10b4f094 authored by SrujanaChalla's avatar SrujanaChalla Committed by Herbert Xu

crypto: marvell - add the Virtual Function driver for CPT

Add support for the cryptographic accelerator unit virtual functions on
OcteonTX 83XX SoC.
Co-developed-by: default avatarLukasz Bartosik <lbartosik@marvell.com>
Signed-off-by: default avatarLukasz Bartosik <lbartosik@marvell.com>
Signed-off-by: default avatarSrujanaChalla <schalla@marvell.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent d9110b0b
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx-cpt.o
obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx-cpt.o octeontx-cptvf.o
octeontx-cpt-objs := otx_cptpf_main.o otx_cptpf_mbox.o otx_cptpf_ucode.o
octeontx-cptvf-objs := otx_cptvf_main.o otx_cptvf_mbox.o otx_cptvf_reqmgr.o \
otx_cptvf_algs.o
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __OTX_CPTVF_H
#define __OTX_CPTVF_H
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include "otx_cpt_common.h"
#include "otx_cptvf_reqmgr.h"
/* Flags to indicate the features supported */
#define OTX_CPT_FLAG_DEVICE_READY BIT(1)
#define otx_cpt_device_ready(cpt) ((cpt)->flags & OTX_CPT_FLAG_DEVICE_READY)
/* Default command queue length */
#define OTX_CPT_CMD_QLEN (4*2046)
#define OTX_CPT_CMD_QCHUNK_SIZE 1023
#define OTX_CPT_NUM_QS_PER_VF 1
struct otx_cpt_cmd_chunk {
u8 *head;
dma_addr_t dma_addr;
u32 size; /* Chunk size, max OTX_CPT_INST_CHUNK_MAX_SIZE */
struct list_head nextchunk;
};
struct otx_cpt_cmd_queue {
u32 idx; /* Command queue host write idx */
u32 num_chunks; /* Number of command chunks */
struct otx_cpt_cmd_chunk *qhead;/*
* Command queue head, instructions
* are inserted here
*/
struct otx_cpt_cmd_chunk *base;
struct list_head chead;
};
struct otx_cpt_cmd_qinfo {
u32 qchunksize; /* Command queue chunk size */
struct otx_cpt_cmd_queue queue[OTX_CPT_NUM_QS_PER_VF];
};
struct otx_cpt_pending_qinfo {
u32 num_queues; /* Number of queues supported */
struct otx_cpt_pending_queue queue[OTX_CPT_NUM_QS_PER_VF];
};
#define for_each_pending_queue(qinfo, q, i) \
for (i = 0, q = &qinfo->queue[i]; i < qinfo->num_queues; i++, \
q = &qinfo->queue[i])
struct otx_cptvf_wqe {
struct tasklet_struct twork;
struct otx_cptvf *cptvf;
};
struct otx_cptvf_wqe_info {
struct otx_cptvf_wqe vq_wqe[OTX_CPT_NUM_QS_PER_VF];
};
struct otx_cptvf {
u16 flags; /* Flags to hold device status bits */
u8 vfid; /* Device Index 0...OTX_CPT_MAX_VF_NUM */
u8 num_vfs; /* Number of enabled VFs */
u8 vftype; /* VF type of SE_TYPE(2) or AE_TYPE(1) */
u8 vfgrp; /* VF group (0 - 8) */
u8 node; /* Operating node: Bits (46:44) in BAR0 address */
u8 priority; /*
* VF priority ring: 1-High proirity round
* robin ring;0-Low priority round robin ring;
*/
struct pci_dev *pdev; /* Pci device handle */
void __iomem *reg_base; /* Register start address */
void *wqe_info; /* BH worker info */
/* MSI-X */
cpumask_var_t affinity_mask[OTX_CPT_VF_MSIX_VECTORS];
/* Command and Pending queues */
u32 qsize;
u32 num_queues;
struct otx_cpt_cmd_qinfo cqinfo; /* Command queue information */
struct otx_cpt_pending_qinfo pqinfo; /* Pending queue information */
/* VF-PF mailbox communication */
bool pf_acked;
bool pf_nacked;
};
int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf);
int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf);
int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group);
int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf);
int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf);
int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf);
void otx_cptvf_handle_mbox_intr(struct otx_cptvf *cptvf);
void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val);
#endif /* __OTX_CPTVF_H */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __OTX_CPT_ALGS_H
#define __OTX_CPT_ALGS_H
#include <crypto/hash.h>
#include "otx_cpt_common.h"
#define OTX_CPT_MAX_ENC_KEY_SIZE 32
#define OTX_CPT_MAX_HASH_KEY_SIZE 64
#define OTX_CPT_MAX_KEY_SIZE (OTX_CPT_MAX_ENC_KEY_SIZE + \
OTX_CPT_MAX_HASH_KEY_SIZE)
enum otx_cpt_request_type {
OTX_CPT_ENC_DEC_REQ = 0x1,
OTX_CPT_AEAD_ENC_DEC_REQ = 0x2,
OTX_CPT_AEAD_ENC_DEC_NULL_REQ = 0x3,
OTX_CPT_PASSTHROUGH_REQ = 0x4
};
enum otx_cpt_major_opcodes {
OTX_CPT_MAJOR_OP_MISC = 0x01,
OTX_CPT_MAJOR_OP_FC = 0x33,
OTX_CPT_MAJOR_OP_HMAC = 0x35,
};
enum otx_cpt_req_type {
OTX_CPT_AE_CORE_REQ,
OTX_CPT_SE_CORE_REQ
};
enum otx_cpt_cipher_type {
OTX_CPT_CIPHER_NULL = 0x0,
OTX_CPT_DES3_CBC = 0x1,
OTX_CPT_DES3_ECB = 0x2,
OTX_CPT_AES_CBC = 0x3,
OTX_CPT_AES_ECB = 0x4,
OTX_CPT_AES_CFB = 0x5,
OTX_CPT_AES_CTR = 0x6,
OTX_CPT_AES_GCM = 0x7,
OTX_CPT_AES_XTS = 0x8
};
enum otx_cpt_mac_type {
OTX_CPT_MAC_NULL = 0x0,
OTX_CPT_MD5 = 0x1,
OTX_CPT_SHA1 = 0x2,
OTX_CPT_SHA224 = 0x3,
OTX_CPT_SHA256 = 0x4,
OTX_CPT_SHA384 = 0x5,
OTX_CPT_SHA512 = 0x6,
OTX_CPT_GMAC = 0x7
};
enum otx_cpt_aes_key_len {
OTX_CPT_AES_128_BIT = 0x1,
OTX_CPT_AES_192_BIT = 0x2,
OTX_CPT_AES_256_BIT = 0x3
};
union otx_cpt_encr_ctrl {
u64 flags;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 enc_cipher:4;
u64 reserved1:1;
u64 aes_key:2;
u64 iv_source:1;
u64 mac_type:4;
u64 reserved2:3;
u64 auth_input_type:1;
u64 mac_len:8;
u64 reserved3:8;
u64 encr_offset:16;
u64 iv_offset:8;
u64 auth_offset:8;
#else
u64 auth_offset:8;
u64 iv_offset:8;
u64 encr_offset:16;
u64 reserved3:8;
u64 mac_len:8;
u64 auth_input_type:1;
u64 reserved2:3;
u64 mac_type:4;
u64 iv_source:1;
u64 aes_key:2;
u64 reserved1:1;
u64 enc_cipher:4;
#endif
} e;
};
struct otx_cpt_cipher {
const char *name;
u8 value;
};
struct otx_cpt_enc_context {
union otx_cpt_encr_ctrl enc_ctrl;
u8 encr_key[32];
u8 encr_iv[16];
};
union otx_cpt_fchmac_ctx {
struct {
u8 ipad[64];
u8 opad[64];
} e;
struct {
u8 hmac_calc[64]; /* HMAC calculated */
u8 hmac_recv[64]; /* HMAC received */
} s;
};
struct otx_cpt_fc_ctx {
struct otx_cpt_enc_context enc;
union otx_cpt_fchmac_ctx hmac;
};
struct otx_cpt_enc_ctx {
u32 key_len;
u8 enc_key[OTX_CPT_MAX_KEY_SIZE];
u8 cipher_type;
u8 key_type;
};
struct otx_cpt_des3_ctx {
u32 key_len;
u8 des3_key[OTX_CPT_MAX_KEY_SIZE];
};
union otx_cpt_offset_ctrl_word {
u64 flags;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved:32;
u64 enc_data_offset:16;
u64 iv_offset:8;
u64 auth_offset:8;
#else
u64 auth_offset:8;
u64 iv_offset:8;
u64 enc_data_offset:16;
u64 reserved:32;
#endif
} e;
};
struct otx_cpt_req_ctx {
struct otx_cpt_req_info cpt_req;
union otx_cpt_offset_ctrl_word ctrl_word;
struct otx_cpt_fc_ctx fctx;
};
struct otx_cpt_sdesc {
struct shash_desc shash;
};
struct otx_cpt_aead_ctx {
u8 key[OTX_CPT_MAX_KEY_SIZE];
struct crypto_shash *hashalg;
struct otx_cpt_sdesc *sdesc;
u8 *ipad;
u8 *opad;
u32 enc_key_len;
u32 auth_key_len;
u8 cipher_type;
u8 mac_type;
u8 key_type;
u8 is_trunc_hmac;
};
int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
enum otx_cptpf_type pf_type,
enum otx_cptvf_type engine_type,
int num_queues, int num_devices);
void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
enum otx_cptvf_type engine_type);
void otx_cpt_callback(int status, void *arg, void *req);
#endif /* __OTX_CPT_ALGS_H */
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include "otx_cptvf.h"
#define CPT_MBOX_MSG_TIMEOUT 2000
static char *get_mbox_opcode_str(int msg_opcode)
{
char *str = "Unknown";
switch (msg_opcode) {
case OTX_CPT_MSG_VF_UP:
str = "UP";
break;
case OTX_CPT_MSG_VF_DOWN:
str = "DOWN";
break;
case OTX_CPT_MSG_READY:
str = "READY";
break;
case OTX_CPT_MSG_QLEN:
str = "QLEN";
break;
case OTX_CPT_MSG_QBIND_GRP:
str = "QBIND_GRP";
break;
case OTX_CPT_MSG_VQ_PRIORITY:
str = "VQ_PRIORITY";
break;
case OTX_CPT_MSG_PF_TYPE:
str = "PF_TYPE";
break;
case OTX_CPT_MSG_ACK:
str = "ACK";
break;
case OTX_CPT_MSG_NACK:
str = "NACK";
break;
}
return str;
}
static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
{
char raw_data_str[OTX_CPT_MAX_MBOX_DATA_STR_SIZE];
hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
if (vf_id >= 0)
pr_debug("MBOX msg %s received from VF%d raw_data %s",
get_mbox_opcode_str(mbox_msg->msg), vf_id,
raw_data_str);
else
pr_debug("MBOX msg %s received from PF raw_data %s",
get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
}
static void cptvf_send_msg_to_pf(struct otx_cptvf *cptvf,
struct otx_cpt_mbox *mbx)
{
/* Writing mbox(1) causes interrupt */
writeq(mbx->msg, cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 0));
writeq(mbx->data, cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 1));
}
/* Interrupt handler to handle mailbox messages from VFs */
void otx_cptvf_handle_mbox_intr(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
/*
* MBOX[0] contains msg
* MBOX[1] contains data
*/
mbx.msg = readq(cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 0));
mbx.data = readq(cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 1));
dump_mbox_msg(&mbx, -1);
switch (mbx.msg) {
case OTX_CPT_MSG_VF_UP:
cptvf->pf_acked = true;
cptvf->num_vfs = mbx.data;
break;
case OTX_CPT_MSG_READY:
cptvf->pf_acked = true;
cptvf->vfid = mbx.data;
dev_dbg(&cptvf->pdev->dev, "Received VFID %d\n", cptvf->vfid);
break;
case OTX_CPT_MSG_QBIND_GRP:
cptvf->pf_acked = true;
cptvf->vftype = mbx.data;
dev_dbg(&cptvf->pdev->dev, "VF %d type %s group %d\n",
cptvf->vfid,
((mbx.data == OTX_CPT_SE_TYPES) ? "SE" : "AE"),
cptvf->vfgrp);
break;
case OTX_CPT_MSG_ACK:
cptvf->pf_acked = true;
break;
case OTX_CPT_MSG_NACK:
cptvf->pf_nacked = true;
break;
default:
dev_err(&cptvf->pdev->dev, "Invalid msg from PF, msg 0x%llx\n",
mbx.msg);
break;
}
}
static int cptvf_send_msg_to_pf_timeout(struct otx_cptvf *cptvf,
struct otx_cpt_mbox *mbx)
{
int timeout = CPT_MBOX_MSG_TIMEOUT;
int sleep = 10;
cptvf->pf_acked = false;
cptvf->pf_nacked = false;
cptvf_send_msg_to_pf(cptvf, mbx);
/* Wait for previous message to be acked, timeout 2sec */
while (!cptvf->pf_acked) {
if (cptvf->pf_nacked)
return -EINVAL;
msleep(sleep);
if (cptvf->pf_acked)
break;
timeout -= sleep;
if (!timeout) {
dev_err(&cptvf->pdev->dev,
"PF didn't ack to mbox msg %llx from VF%u\n",
mbx->msg, cptvf->vfid);
return -EBUSY;
}
}
return 0;
}
/*
* Checks if VF is able to comminicate with PF
* and also gets the CPT number this VF is associated to.
*/
int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
int ret;
mbx.msg = OTX_CPT_MSG_READY;
ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
return ret;
}
/*
* Communicate VQs size to PF to program CPT(0)_PF_Q(0-15)_CTL of the VF.
* Must be ACKed.
*/
int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
int ret;
mbx.msg = OTX_CPT_MSG_QLEN;
mbx.data = cptvf->qsize;
ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
return ret;
}
/*
* Communicate VF group required to PF and get the VQ binded to that group
*/
int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group)
{
struct otx_cpt_mbox mbx = {};
int ret;
mbx.msg = OTX_CPT_MSG_QBIND_GRP;
/* Convey group of the VF */
mbx.data = group;
ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
if (ret)
return ret;
cptvf->vfgrp = group;
return 0;
}
/*
* Communicate VF group required to PF and get the VQ binded to that group
*/
int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
int ret;
mbx.msg = OTX_CPT_MSG_VQ_PRIORITY;
/* Convey group of the VF */
mbx.data = cptvf->priority;
ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
return ret;
}
/*
* Communicate to PF that VF is UP and running
*/
int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
int ret;
mbx.msg = OTX_CPT_MSG_VF_UP;
ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
return ret;
}
/*
* Communicate to PF that VF is DOWN and running
*/
int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
int ret;
mbx.msg = OTX_CPT_MSG_VF_DOWN;
ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
return ret;
}
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __OTX_CPTVF_REQUEST_MANAGER_H
#define __OTX_CPTVF_REQUEST_MANAGER_H
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/pci.h>
#include "otx_cpt_hw_types.h"
/*
* Maximum total number of SG buffers is 100, we divide it equally
* between input and output
*/
#define OTX_CPT_MAX_SG_IN_CNT 50
#define OTX_CPT_MAX_SG_OUT_CNT 50
/* DMA mode direct or SG */
#define OTX_CPT_DMA_DIRECT_DIRECT 0
#define OTX_CPT_DMA_GATHER_SCATTER 1
/* Context source CPTR or DPTR */
#define OTX_CPT_FROM_CPTR 0
#define OTX_CPT_FROM_DPTR 1
/* CPT instruction queue alignment */
#define OTX_CPT_INST_Q_ALIGNMENT 128
#define OTX_CPT_MAX_REQ_SIZE 65535
/* Default command timeout in seconds */
#define OTX_CPT_COMMAND_TIMEOUT 4
#define OTX_CPT_TIMER_HOLD 0x03F
#define OTX_CPT_COUNT_HOLD 32
#define OTX_CPT_TIME_IN_RESET_COUNT 5
/* Minimum and maximum values for interrupt coalescing */
#define OTX_CPT_COALESC_MIN_TIME_WAIT 0x0
#define OTX_CPT_COALESC_MAX_TIME_WAIT ((1<<16)-1)
#define OTX_CPT_COALESC_MIN_NUM_WAIT 0x0
#define OTX_CPT_COALESC_MAX_NUM_WAIT ((1<<20)-1)
union otx_cpt_opcode_info {
u16 flags;
struct {
u8 major;
u8 minor;
} s;
};
struct otx_cptvf_request {
u32 param1;
u32 param2;
u16 dlen;
union otx_cpt_opcode_info opcode;
};
struct otx_cpt_buf_ptr {
u8 *vptr;
dma_addr_t dma_addr;
u16 size;
};
union otx_cpt_ctrl_info {
u32 flags;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u32 reserved0:26;
u32 grp:3; /* Group bits */
u32 dma_mode:2; /* DMA mode */
u32 se_req:1; /* To SE core */
#else
u32 se_req:1; /* To SE core */
u32 dma_mode:2; /* DMA mode */
u32 grp:3; /* Group bits */
u32 reserved0:26;
#endif
} s;
};
/*
* CPT_INST_S software command definitions
* Words EI (0-3)
*/
union otx_cpt_iq_cmd_word0 {
u64 u64;
struct {
u16 opcode;
u16 param1;
u16 param2;
u16 dlen;
} s;
};
union otx_cpt_iq_cmd_word3 {
u64 u64;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 grp:3;
u64 cptr:61;
#else
u64 cptr:61;
u64 grp:3;
#endif
} s;
};
struct otx_cpt_iq_cmd {
union otx_cpt_iq_cmd_word0 cmd;
u64 dptr;
u64 rptr;
union otx_cpt_iq_cmd_word3 cptr;
};
struct otx_cpt_sglist_component {
union {
u64 len;
struct {
u16 len0;
u16 len1;
u16 len2;
u16 len3;
} s;
} u;
u64 ptr0;
u64 ptr1;
u64 ptr2;
u64 ptr3;
};
struct otx_cpt_pending_entry {
u64 *completion_addr; /* Completion address */
struct otx_cpt_info_buffer *info;
/* Kernel async request callback */
void (*callback)(int status, void *arg1, void *arg2);
struct crypto_async_request *areq; /* Async request callback arg */
u8 resume_sender; /* Notify sender to resume sending requests */
u8 busy; /* Entry status (free/busy) */
};
struct otx_cpt_pending_queue {
struct otx_cpt_pending_entry *head; /* Head of the queue */
u32 front; /* Process work from here */
u32 rear; /* Append new work here */
u32 pending_count; /* Pending requests count */
u32 qlen; /* Queue length */
spinlock_t lock; /* Queue lock */
};
struct otx_cpt_req_info {
/* Kernel async request callback */
void (*callback)(int status, void *arg1, void *arg2);
struct crypto_async_request *areq; /* Async request callback arg */
struct otx_cptvf_request req;/* Request information (core specific) */
union otx_cpt_ctrl_info ctrl;/* User control information */
struct otx_cpt_buf_ptr in[OTX_CPT_MAX_SG_IN_CNT];
struct otx_cpt_buf_ptr out[OTX_CPT_MAX_SG_OUT_CNT];
u8 *iv_out; /* IV to send back */
u16 rlen; /* Output length */
u8 incnt; /* Number of input buffers */
u8 outcnt; /* Number of output buffers */
u8 req_type; /* Type of request */
u8 is_enc; /* Is a request an encryption request */
u8 is_trunc_hmac;/* Is truncated hmac used */
};
struct otx_cpt_info_buffer {
struct otx_cpt_pending_entry *pentry;
struct otx_cpt_req_info *req;
struct pci_dev *pdev;
u64 *completion_addr;
u8 *out_buffer;
u8 *in_buffer;
dma_addr_t dptr_baddr;
dma_addr_t rptr_baddr;
dma_addr_t comp_baddr;
unsigned long time_in;
u32 dlen;
u32 dma_len;
u8 extra_time;
};
static inline void do_request_cleanup(struct pci_dev *pdev,
struct otx_cpt_info_buffer *info)
{
struct otx_cpt_req_info *req;
int i;
if (info->dptr_baddr)
dma_unmap_single(&pdev->dev, info->dptr_baddr,
info->dma_len, DMA_BIDIRECTIONAL);
if (info->req) {
req = info->req;
for (i = 0; i < req->outcnt; i++) {
if (req->out[i].dma_addr)
dma_unmap_single(&pdev->dev,
req->out[i].dma_addr,
req->out[i].size,
DMA_BIDIRECTIONAL);
}
for (i = 0; i < req->incnt; i++) {
if (req->in[i].dma_addr)
dma_unmap_single(&pdev->dev,
req->in[i].dma_addr,
req->in[i].size,
DMA_BIDIRECTIONAL);
}
}
kzfree(info);
}
struct otx_cptvf_wqe;
void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req);
void otx_cpt_post_process(struct otx_cptvf_wqe *wqe);
int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
int cpu_num);
#endif /* __OTX_CPTVF_REQUEST_MANAGER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment