Commit f45c40a9 authored by Sam Bradshaw's avatar Sam Bradshaw Committed by Jens Axboe

mtip32xx: minor performance enhancements

This patch adds the following:

1) Compiler hinting in the fast path.
2) A prefetch of port->flags to eliminate moderate cpu stalling later
in mtip_hw_submit_io().
3) Eliminate a redundant rq_data_dir().
4) Reorder members of driver_data to eliminate false cacheline sharing
between irq_workers_active and unal_qdepth.

With some workload and topology configurations, I'm seeing ~1.5%
throughput improvement in small block random read benchmarks as well
as improved latency std. dev.
Signed-off-by: default avatarSam Bradshaw <sbradshaw@micron.com>

Add include of <linux/prefetch.h>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent f6be4fb4
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <../drivers/ata/ahci.h> #include <../drivers/ata/ahci.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/prefetch.h>
#include "mtip32xx.h" #include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
...@@ -2380,6 +2381,8 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, ...@@ -2380,6 +2381,8 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
/* Map the scatter list for DMA access */ /* Map the scatter list for DMA access */
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
prefetch(&port->flags);
command->scatter_ents = nents; command->scatter_ents = nents;
/* /*
...@@ -2392,7 +2395,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, ...@@ -2392,7 +2395,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
fis = command->command; fis = command->command;
fis->type = 0x27; fis->type = 0x27;
fis->opts = 1 << 7; fis->opts = 1 << 7;
if (rq_data_dir(rq) == READ) if (dma_dir == DMA_FROM_DEVICE)
fis->command = ATA_CMD_FPDMA_READ; fis->command = ATA_CMD_FPDMA_READ;
else else
fis->command = ATA_CMD_FPDMA_WRITE; fis->command = ATA_CMD_FPDMA_WRITE;
...@@ -2412,7 +2415,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, ...@@ -2412,7 +2415,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
fis->res3 = 0; fis->res3 = 0;
fill_command_sg(dd, command, nents); fill_command_sg(dd, command, nents);
if (command->unaligned) if (unlikely(command->unaligned))
fis->device |= 1 << 7; fis->device |= 1 << 7;
/* Populate the command header */ /* Populate the command header */
...@@ -2433,7 +2436,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, ...@@ -2433,7 +2436,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
* To prevent this command from being issued * To prevent this command from being issued
* if an internal command is in progress or error handling is active. * if an internal command is in progress or error handling is active.
*/ */
if (port->flags & MTIP_PF_PAUSE_IO) { if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
set_bit(rq->tag, port->cmds_to_issue); set_bit(rq->tag, port->cmds_to_issue);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
return; return;
...@@ -3754,7 +3757,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, ...@@ -3754,7 +3757,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
struct driver_data *dd = hctx->queue->queuedata; struct driver_data *dd = hctx->queue->queuedata;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
if (!dd->unal_qdepth || rq_data_dir(rq) == READ) if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
return false; return false;
/* /*
...@@ -3776,11 +3779,11 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) ...@@ -3776,11 +3779,11 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
{ {
int ret; int ret;
if (mtip_check_unal_depth(hctx, rq)) if (unlikely(mtip_check_unal_depth(hctx, rq)))
return BLK_MQ_RQ_QUEUE_BUSY; return BLK_MQ_RQ_QUEUE_BUSY;
ret = mtip_submit_request(hctx, rq); ret = mtip_submit_request(hctx, rq);
if (!ret) if (likely(!ret))
return BLK_MQ_RQ_QUEUE_OK; return BLK_MQ_RQ_QUEUE_OK;
rq->errors = ret; rq->errors = ret;
......
...@@ -493,19 +493,19 @@ struct driver_data { ...@@ -493,19 +493,19 @@ struct driver_data {
struct workqueue_struct *isr_workq; struct workqueue_struct *isr_workq;
struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
atomic_t irq_workers_active; atomic_t irq_workers_active;
struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
int isr_binding; int isr_binding;
struct block_device *bdev; struct block_device *bdev;
int unal_qdepth; /* qdepth of unaligned IO queue */
struct list_head online_list; /* linkage for online list */ struct list_head online_list; /* linkage for online list */
struct list_head remove_list; /* linkage for removing list */ struct list_head remove_list; /* linkage for removing list */
int unal_qdepth; /* qdepth of unaligned IO queue */
}; };
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment