Commit c30362cc authored by Tomas Winkler's avatar Tomas Winkler Committed by Greg Kroah-Hartman

mei: dma ring: implement transmit flow

Implement a circular buffer on allocated system memory. Read and write
indices are stored on the control block which is also shared between the
device and the host.
Two new functions are exported from the DMA module: mei_dma_ring_write,
and mei_dma_ring_empty_slots. The former simply copy a packet on the TX
DMA circular buffer and later, returns the number of empty slots on the
TX DMA circular buffer.
Signed-off-by: default avatarTomas Winkler <tomas.winkler@intel.com>
Signed-off-by: default avatarAlexander Usyskin <alexander.usyskin@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 6316321f
...@@ -1558,10 +1558,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, ...@@ -1558,10 +1558,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_msg_hdr mei_hdr; struct mei_msg_hdr mei_hdr;
size_t hdr_len = sizeof(mei_hdr); size_t hdr_len = sizeof(mei_hdr);
size_t len; size_t len;
size_t hbuf_len; size_t hbuf_len, dr_len;
int hbuf_slots; int hbuf_slots;
u32 dr_slots;
u32 dma_len;
int rets; int rets;
bool first_chunk; bool first_chunk;
const void *data;
if (WARN_ON(!cl || !cl->dev)) if (WARN_ON(!cl || !cl->dev))
return -ENODEV; return -ENODEV;
...@@ -1582,6 +1585,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, ...@@ -1582,6 +1585,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
} }
len = buf->size - cb->buf_idx; len = buf->size - cb->buf_idx;
data = buf->data + cb->buf_idx;
hbuf_slots = mei_hbuf_empty_slots(dev); hbuf_slots = mei_hbuf_empty_slots(dev);
if (hbuf_slots < 0) { if (hbuf_slots < 0) {
rets = -EOVERFLOW; rets = -EOVERFLOW;
...@@ -1589,6 +1593,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, ...@@ -1589,6 +1593,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
} }
hbuf_len = mei_slots2data(hbuf_slots); hbuf_len = mei_slots2data(hbuf_slots);
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
mei_msg_hdr_init(&mei_hdr, cb); mei_msg_hdr_init(&mei_hdr, cb);
...@@ -1599,23 +1605,33 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, ...@@ -1599,23 +1605,33 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
if (len + hdr_len <= hbuf_len) { if (len + hdr_len <= hbuf_len) {
mei_hdr.length = len; mei_hdr.length = len;
mei_hdr.msg_complete = 1; mei_hdr.msg_complete = 1;
} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
mei_hdr.dma_ring = 1;
if (len > dr_len)
len = dr_len;
else
mei_hdr.msg_complete = 1;
mei_hdr.length = sizeof(dma_len);
dma_len = len;
data = &dma_len;
} else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
mei_hdr.length = hbuf_len - hdr_len; len = hbuf_len - hdr_len;
mei_hdr.length = len;
} else { } else {
return 0; return 0;
} }
cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", if (mei_hdr.dma_ring)
cb->buf.size, cb->buf_idx); mei_dma_ring_write(dev, buf->data + cb->buf_idx, len);
rets = mei_write_message(dev, &mei_hdr, hdr_len, rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length);
buf->data + cb->buf_idx, mei_hdr.length);
if (rets) if (rets)
goto err; goto err;
cl->status = 0; cl->status = 0;
cl->writing_state = MEI_WRITING; cl->writing_state = MEI_WRITING;
cb->buf_idx += mei_hdr.length; cb->buf_idx += len;
if (first_chunk) { if (first_chunk) {
if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
...@@ -1650,11 +1666,13 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) ...@@ -1650,11 +1666,13 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
struct mei_msg_data *buf; struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr; struct mei_msg_hdr mei_hdr;
size_t hdr_len = sizeof(mei_hdr); size_t hdr_len = sizeof(mei_hdr);
size_t len; size_t len, hbuf_len, dr_len;
size_t hbuf_len;
int hbuf_slots; int hbuf_slots;
u32 dr_slots;
u32 dma_len;
ssize_t rets; ssize_t rets;
bool blocking; bool blocking;
const void *data;
if (WARN_ON(!cl || !cl->dev)) if (WARN_ON(!cl || !cl->dev))
return -ENODEV; return -ENODEV;
...@@ -1666,10 +1684,12 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) ...@@ -1666,10 +1684,12 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
buf = &cb->buf; buf = &cb->buf;
len = buf->size; len = buf->size;
blocking = cb->blocking;
cl_dbg(dev, cl, "len=%zd\n", len); cl_dbg(dev, cl, "len=%zd\n", len);
blocking = cb->blocking;
data = buf->data;
rets = pm_runtime_get(dev->dev); rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) { if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev); pm_runtime_put_noidle(dev->dev);
...@@ -1706,16 +1726,32 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) ...@@ -1706,16 +1726,32 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
} }
hbuf_len = mei_slots2data(hbuf_slots); hbuf_len = mei_slots2data(hbuf_slots);
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
if (len + hdr_len <= hbuf_len) { if (len + hdr_len <= hbuf_len) {
mei_hdr.length = len; mei_hdr.length = len;
mei_hdr.msg_complete = 1; mei_hdr.msg_complete = 1;
} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
mei_hdr.dma_ring = 1;
if (len > dr_len)
len = dr_len;
else
mei_hdr.msg_complete = 1;
mei_hdr.length = sizeof(dma_len);
dma_len = len;
data = &dma_len;
} else { } else {
mei_hdr.length = hbuf_len - hdr_len; len = hbuf_len - hdr_len;
mei_hdr.length = len;
} }
if (mei_hdr.dma_ring)
mei_dma_ring_write(dev, buf->data, len);
rets = mei_write_message(dev, &mei_hdr, hdr_len, rets = mei_write_message(dev, &mei_hdr, hdr_len,
buf->data, mei_hdr.length); data, mei_hdr.length);
if (rets) if (rets)
goto err; goto err;
...@@ -1724,7 +1760,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) ...@@ -1724,7 +1760,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
goto err; goto err;
cl->writing_state = MEI_WRITING; cl->writing_state = MEI_WRITING;
cb->buf_idx = mei_hdr.length; cb->buf_idx = len;
/* restore return value */
len = buf->size;
out: out:
if (mei_hdr.msg_complete) if (mei_hdr.msg_complete)
......
...@@ -138,6 +138,26 @@ static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf, ...@@ -138,6 +138,26 @@ static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
return b_n; return b_n;
} }
/**
* mei_dma_copy_to() - copy to a buffer to the dma ring
* @dev: mei device
* @buf: data buffer
* @offset: offset in slots.
* @n: number of slots to copy.
*/
static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
u32 offset, u32 n)
{
unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr;
size_t b_offset = offset << 2;
size_t b_n = n << 2;
memcpy(hbuf + b_offset, buf, b_n);
return b_n;
}
/** /**
* mei_dma_ring_read() - read data from the ring * mei_dma_ring_read() - read data from the ring
* @dev: mei device * @dev: mei device
...@@ -178,3 +198,72 @@ void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len) ...@@ -178,3 +198,72 @@ void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
out: out:
WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots); WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots);
} }
static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev)
{
return dev->dr_dscr[DMA_DSCR_HOST].size >> 2;
}
/**
* mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring
* @dev: mei_device
*
* Return: number of empty slots
*/
u32 mei_dma_ring_empty_slots(struct mei_device *dev)
{
struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
u32 wr_idx, rd_idx, hbuf_depth, empty;
if (!mei_dma_ring_is_allocated(dev))
return 0;
if (WARN_ON(!ctrl))
return 0;
/* easier to work in slots */
hbuf_depth = mei_dma_ring_hbuf_depth(dev);
rd_idx = READ_ONCE(ctrl->hbuf_rd_idx);
wr_idx = READ_ONCE(ctrl->hbuf_wr_idx);
if (rd_idx > wr_idx)
empty = rd_idx - wr_idx;
else
empty = hbuf_depth - (wr_idx - rd_idx);
return empty;
}
/**
* mei_dma_ring_write - write data to dma ring host buffer
*
* @dev: mei_device
* @buf: data will be written
* @len: data length
*/
void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
{
struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
u32 hbuf_depth;
u32 wr_idx, rem, slots;
if (WARN_ON(!ctrl))
return;
dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
hbuf_depth = mei_dma_ring_hbuf_depth(dev);
wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
slots = mei_data2slots(len);
if (wr_idx + slots > hbuf_depth) {
buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx);
rem = slots - (hbuf_depth - wr_idx);
wr_idx = 0;
} else {
rem = slots;
}
mei_dma_copy_to(dev, buf, wr_idx, rem);
WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots);
}
...@@ -599,6 +599,8 @@ void mei_dmam_ring_free(struct mei_device *dev); ...@@ -599,6 +599,8 @@ void mei_dmam_ring_free(struct mei_device *dev);
bool mei_dma_ring_is_allocated(struct mei_device *dev); bool mei_dma_ring_is_allocated(struct mei_device *dev);
void mei_dma_ring_reset(struct mei_device *dev); void mei_dma_ring_reset(struct mei_device *dev);
void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len); void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len);
void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len);
u32 mei_dma_ring_empty_slots(struct mei_device *dev);
/* /*
* MEI interrupt functions prototype * MEI interrupt functions prototype
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment