Commit 968a64ea authored by Kyoungil Kim's avatar Kyoungil Kim Committed by Chris Ball

mmc: sdio: Use multiple scatter/gather list

Before this patch, we always used only single sg entry for SDIO transfer.
This patch switches to using multiple sg entries. In the case of dwmci,
it supports only up to 4KB size per single sg entry. So if we want to
transfer more than 4KB, we should send more than 1 command.

When we tested before applying this patch, it took around 335 us for
5K(5120) bytes transfer with dwmci controller. After applying this patch,
it takes 242 us for 5K bytes. So this patch makes around 38% performance
improvement for 5K bytes transfer. If the transfer size is bigger, then
the performance improvement ratio will be increased.
Signed-off-by: default avatarKyoungil Kim <ki0351.kim@samsung.com>
Signed-off-by: default avatarChris Ball <cjb@laptop.org>
parent 8fee476b
......@@ -188,8 +188,7 @@ EXPORT_SYMBOL_GPL(sdio_set_block_size);
*/
static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
{
unsigned mval = min(func->card->host->max_seg_size,
func->card->host->max_blk_size);
unsigned mval = func->card->host->max_blk_size;
if (mmc_blksz_for_byte_mode(func->card))
mval = min(mval, func->cur_blksize);
......@@ -311,11 +310,8 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
/* Do the bulk of the transfer using block mode (if supported). */
if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {
/* Blocks per command is limited by host count, host transfer
* size (we only use a single sg entry) and the maximum for
* IO_RW_EXTENDED of 511 blocks. */
max_blocks = min(func->card->host->max_blk_count,
func->card->host->max_seg_size / func->cur_blksize);
max_blocks = min(max_blocks, 511u);
* size and the maximum for IO_RW_EXTENDED of 511 blocks. */
max_blocks = min(func->card->host->max_blk_count, 511u);
while (remainder >= func->cur_blksize) {
unsigned blocks;
......
......@@ -124,7 +124,10 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
struct scatterlist sg, *sg_ptr;
struct sg_table sgtable;
unsigned int nents, left_size, i;
unsigned int seg_size = card->host->max_seg_size;
BUG_ON(!card);
BUG_ON(fn > 7);
......@@ -152,15 +155,36 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
/* Code in host drivers/fwk assumes that "blocks" always is >=1 */
data.blocks = blocks ? blocks : 1;
data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, buf, data.blksz * data.blocks);
left_size = data.blksz * data.blocks;
nents = (left_size - 1) / seg_size + 1;
if (nents > 1) {
if (sg_alloc_table(&sgtable, nents, GFP_KERNEL))
return -ENOMEM;
data.sg = sgtable.sgl;
data.sg_len = nents;
for_each_sg(data.sg, sg_ptr, data.sg_len, i) {
sg_set_page(sg_ptr, virt_to_page(buf + (i * seg_size)),
min(seg_size, left_size),
offset_in_page(buf + (i * seg_size)));
left_size = left_size - seg_size;
}
} else {
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, buf, left_size);
}
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
if (nents > 1)
sg_free_table(&sgtable);
if (cmd.error)
return cmd.error;
if (data.error)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment