Commit 8aa34172 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "The bulk of the changes are in qla2xxx target driver code to address
  various issues found during Cavium/QLogic's internal testing (stable
  CC's included), along with a few other stability and smaller
  miscellaneous improvements.

  There are also a couple of different patch sets from Mike Christie,
  which have been a result of his work to use target-core ALUA logic
  together with tcm-user backend driver.

  Finally, a patch to address some long standing issues with
  pass-through SCSI export of TYPE_TAPE + TYPE_MEDIUM_CHANGER devices,
  which will make folks using physical (or virtual) magnetic tape happy"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (28 commits)
  qla2xxx: Update driver version to 9.00.00.00-k
  qla2xxx: Fix delayed response to command for loop mode/direct connect.
  qla2xxx: Change scsi host lookup method.
  qla2xxx: Add DebugFS node to display Port Database
  qla2xxx: Use IOCB interface to submit non-critical MBX.
  qla2xxx: Add async new target notification
  qla2xxx: Export DIF stats via debugfs
  qla2xxx: Improve T10-DIF/PI handling in driver.
  qla2xxx: Allow relogin to proceed if remote login did not finish
  qla2xxx: Fix sess_lock & hardware_lock lock order problem.
  qla2xxx: Fix inadequate lock protection for ABTS.
  qla2xxx: Fix request queue corruption.
  qla2xxx: Fix memory leak for abts processing
  qla2xxx: Allow vref count to timeout on vport delete.
  tcmu: Convert cmd_time_out into backend device attribute
  tcmu: make cmd timeout configurable
  tcmu: add helper to check if dev was configured
  target: fix race during implicit transition work flushes
  target: allow userspace to set state to transitioning
  target: fix ALUA transition timeout handling
  ...
parents 1b8df619 6c611d18
...@@ -3,6 +3,7 @@ config SCSI_QLA_FC ...@@ -3,6 +3,7 @@ config SCSI_QLA_FC
depends on PCI && SCSI depends on PCI && SCSI
depends on SCSI_FC_ATTRS depends on SCSI_FC_ATTRS
select FW_LOADER select FW_LOADER
select BTREE
---help--- ---help---
This qla2xxx driver supports all QLogic Fibre Channel This qla2xxx driver supports all QLogic Fibre Channel
PCI and PCIe host adapters. PCI and PCIe host adapters.
......
...@@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) ...@@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Timer for the VP[%d] has stopped\n", vha->vp_idx); "Timer for the VP[%d] has stopped\n", vha->vp_idx);
} }
BUG_ON(atomic_read(&vha->vref_count));
qla2x00_free_fcports(vha); qla2x00_free_fcports(vha);
mutex_lock(&ha->vport_lock); mutex_lock(&ha->vport_lock);
...@@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) ...@@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma); vha->gnl.ldma);
if (vha->qpair->vp_idx == vha->vp_idx) { if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087, ql_log(ql_log_warn, vha, 0x7087,
"Queue Pair delete failed.\n"); "Queue Pair delete failed.\n");
......
...@@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); ...@@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_tgt 0x00004000 /* Target mode */ #define ql_dbg_tgt 0x00004000 /* Target mode */
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **); uint32_t, void **);
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/aer.h> #include <linux/aer.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/btree.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
...@@ -395,11 +396,15 @@ struct srb_iocb { ...@@ -395,11 +396,15 @@ struct srb_iocb {
struct completion comp; struct completion comp;
} abt; } abt;
struct ct_arg ctarg; struct ct_arg ctarg;
#define MAX_IOCB_MB_REG 28
#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
struct { struct {
__le16 in_mb[28]; /* fr fw */ __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
__le16 out_mb[28]; /* to fw */ __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
void *out, *in; void *out, *in;
dma_addr_t out_dma, in_dma; dma_addr_t out_dma, in_dma;
struct completion comp;
int rc;
} mbx; } mbx;
struct { struct {
struct imm_ntfy_from_isp *ntfy; struct imm_ntfy_from_isp *ntfy;
...@@ -437,7 +442,7 @@ typedef struct srb { ...@@ -437,7 +442,7 @@ typedef struct srb {
uint32_t handle; uint32_t handle;
uint16_t flags; uint16_t flags;
uint16_t type; uint16_t type;
char *name; const char *name;
int iocbs; int iocbs;
struct qla_qpair *qpair; struct qla_qpair *qpair;
u32 gen1; /* scratch */ u32 gen1; /* scratch */
...@@ -2300,6 +2305,8 @@ typedef struct fc_port { ...@@ -2300,6 +2305,8 @@ typedef struct fc_port {
struct ct_sns_desc ct_desc; struct ct_sns_desc ct_desc;
enum discovery_state disc_state; enum discovery_state disc_state;
enum login_state fw_login_state; enum login_state fw_login_state;
unsigned long plogi_nack_done_deadline;
u32 login_gen, last_login_gen; u32 login_gen, last_login_gen;
u32 rscn_gen, last_rscn_gen; u32 rscn_gen, last_rscn_gen;
u32 chip_reset; u32 chip_reset;
...@@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx { ...@@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx {
uint32_t gold_fw_version; uint32_t gold_fw_version;
}; };
struct qla_dif_statistics {
uint64_t dif_input_bytes;
uint64_t dif_output_bytes;
uint64_t dif_input_requests;
uint64_t dif_output_requests;
uint32_t dif_guard_err;
uint32_t dif_ref_tag_err;
uint32_t dif_app_tag_err;
};
struct qla_statistics { struct qla_statistics {
uint32_t total_isp_aborts; uint32_t total_isp_aborts;
uint64_t input_bytes; uint64_t input_bytes;
...@@ -3118,6 +3135,8 @@ struct qla_statistics { ...@@ -3118,6 +3135,8 @@ struct qla_statistics {
uint32_t stat_max_pend_cmds; uint32_t stat_max_pend_cmds;
uint32_t stat_max_qfull_cmds_alloc; uint32_t stat_max_qfull_cmds_alloc;
uint32_t stat_max_qfull_cmds_dropped; uint32_t stat_max_qfull_cmds_dropped;
struct qla_dif_statistics qla_dif_stats;
}; };
struct bidi_statistics { struct bidi_statistics {
...@@ -3125,6 +3144,16 @@ struct bidi_statistics { ...@@ -3125,6 +3144,16 @@ struct bidi_statistics {
unsigned long long transfer_bytes; unsigned long long transfer_bytes;
}; };
struct qla_tc_param {
struct scsi_qla_host *vha;
uint32_t blk_sz;
uint32_t bufflen;
struct scatterlist *sg;
struct scatterlist *prot_sg;
struct crc_context *ctx;
uint8_t *ctx_dsd_alloced;
};
/* Multi queue support */ /* Multi queue support */
#define MBC_INITIALIZE_MULTIQ 0x1f #define MBC_INITIALIZE_MULTIQ 0x1f
#define QLA_QUE_PAGE 0X1000 #define QLA_QUE_PAGE 0X1000
...@@ -3272,6 +3301,8 @@ struct qlt_hw_data { ...@@ -3272,6 +3301,8 @@ struct qlt_hw_data {
uint8_t tgt_node_name[WWN_SIZE]; uint8_t tgt_node_name[WWN_SIZE];
struct dentry *dfs_tgt_sess; struct dentry *dfs_tgt_sess;
struct dentry *dfs_tgt_port_database;
struct list_head q_full_list; struct list_head q_full_list;
uint32_t num_pend_cmds; uint32_t num_pend_cmds;
uint32_t num_qfull_cmds_alloc; uint32_t num_qfull_cmds_alloc;
...@@ -3281,6 +3312,7 @@ struct qlt_hw_data { ...@@ -3281,6 +3312,7 @@ struct qlt_hw_data {
spinlock_t sess_lock; spinlock_t sess_lock;
int rspq_vector_cpuid; int rspq_vector_cpuid;
spinlock_t atio_lock ____cacheline_aligned; spinlock_t atio_lock ____cacheline_aligned;
struct btree_head32 host_map;
}; };
#define MAX_QFULL_CMDS_ALLOC 8192 #define MAX_QFULL_CMDS_ALLOC 8192
...@@ -3290,6 +3322,10 @@ struct qlt_hw_data { ...@@ -3290,6 +3322,10 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
#define QLA_EARLY_LINKUP(_ha) \
((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
_ha->flags.fw_started && !_ha->flags.fw_init_done)
/* /*
* Qlogic host adapter specific data structure. * Qlogic host adapter specific data structure.
*/ */
...@@ -3339,7 +3375,11 @@ struct qla_hw_data { ...@@ -3339,7 +3375,11 @@ struct qla_hw_data {
uint32_t fawwpn_enabled:1; uint32_t fawwpn_enabled:1;
uint32_t exlogins_enabled:1; uint32_t exlogins_enabled:1;
uint32_t exchoffld_enabled:1; uint32_t exchoffld_enabled:1;
/* 35 bits */
uint32_t lip_ae:1;
uint32_t n2n_ae:1;
uint32_t fw_started:1;
uint32_t fw_init_done:1;
} flags; } flags;
/* This spinlock is used to protect "io transactions", you must /* This spinlock is used to protect "io transactions", you must
...@@ -3432,7 +3472,6 @@ struct qla_hw_data { ...@@ -3432,7 +3472,6 @@ struct qla_hw_data {
#define P2P_LOOP 3 #define P2P_LOOP 3
uint8_t interrupts_on; uint8_t interrupts_on;
uint32_t isp_abort_cnt; uint32_t isp_abort_cnt;
#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 #define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 #define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
...@@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host { ...@@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host {
struct list_head vp_fcports; /* list of fcports */ struct list_head vp_fcports; /* list of fcports */
struct list_head work_list; struct list_head work_list;
spinlock_t work_lock; spinlock_t work_lock;
struct work_struct iocb_work;
/* Commonly used flags and state information. */ /* Commonly used flags and state information. */
struct Scsi_Host *host; struct Scsi_Host *host;
...@@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host { ...@@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host {
/* Count of active session/fcport */ /* Count of active session/fcport */
int fcport_count; int fcport_count;
wait_queue_head_t fcport_waitQ; wait_queue_head_t fcport_waitQ;
wait_queue_head_t vref_waitq;
} scsi_qla_host_t; } scsi_qla_host_t;
struct qla27xx_image_status { struct qla27xx_image_status {
...@@ -4131,14 +4172,17 @@ struct qla2_sgx { ...@@ -4131,14 +4172,17 @@ struct qla2_sgx {
mb(); \ mb(); \
if (__vha->flags.delete_progress) { \ if (__vha->flags.delete_progress) { \
atomic_dec(&__vha->vref_count); \ atomic_dec(&__vha->vref_count); \
wake_up(&__vha->vref_waitq); \
__bail = 1; \ __bail = 1; \
} else { \ } else { \
__bail = 0; \ __bail = 0; \
} \ } \
} while (0) } while (0)
#define QLA_VHA_MARK_NOT_BUSY(__vha) \ #define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
atomic_dec(&__vha->vref_count); \ atomic_dec(&__vha->vref_count); \
wake_up(&__vha->vref_waitq); \
} while (0) \
#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ #define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
atomic_inc(&__qpair->ref_count); \ atomic_inc(&__qpair->ref_count); \
......
...@@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) ...@@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
unsigned long flags; unsigned long flags;
struct fc_port *sess = NULL; struct fc_port *sess = NULL;
struct qla_tgt *tgt= vha->vha_tgt.qla_tgt; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
seq_printf(s, "%s\n",vha->host_str); seq_printf(s, "%s\n", vha->host_str);
if (tgt) { if (tgt) {
seq_printf(s, "Port ID Port Name Handle\n"); seq_puts(s, "Port ID Port Name Handle\n");
spin_lock_irqsave(&ha->tgt.sess_lock, flags); spin_lock_irqsave(&ha->tgt.sess_lock, flags);
list_for_each_entry(sess, &vha->vp_fcports, list) list_for_each_entry(sess, &vha->vp_fcports, list)
...@@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file) ...@@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
return single_open(file, qla2x00_dfs_tgt_sess_show, vha); return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
} }
static const struct file_operations dfs_tgt_sess_ops = { static const struct file_operations dfs_tgt_sess_ops = {
.open = qla2x00_dfs_tgt_sess_open, .open = qla2x00_dfs_tgt_sess_open,
.read = seq_read, .read = seq_read,
...@@ -52,6 +51,78 @@ static const struct file_operations dfs_tgt_sess_ops = { ...@@ -52,6 +51,78 @@ static const struct file_operations dfs_tgt_sess_ops = {
.release = single_release, .release = single_release,
}; };
static int
qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
{
scsi_qla_host_t *vha = s->private;
struct qla_hw_data *ha = vha->hw;
struct gid_list_info *gid_list;
dma_addr_t gid_list_dma;
fc_port_t fc_port;
char *id_iter;
int rc, i;
uint16_t entries, loop_id;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
seq_printf(s, "%s\n", vha->host_str);
if (tgt) {
gid_list = dma_alloc_coherent(&ha->pdev->dev,
qla2x00_gid_list_size(ha),
&gid_list_dma, GFP_KERNEL);
if (!gid_list) {
ql_dbg(ql_dbg_user, vha, 0x705c,
"DMA allocation failed for %u\n",
qla2x00_gid_list_size(ha));
return 0;
}
rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
&entries);
if (rc != QLA_SUCCESS)
goto out_free_id_list;
id_iter = (char *)gid_list;
seq_puts(s, "Port Name Port ID Loop ID\n");
for (i = 0; i < entries; i++) {
struct gid_list_info *gid =
(struct gid_list_info *)id_iter;
loop_id = le16_to_cpu(gid->loop_id);
memset(&fc_port, 0, sizeof(fc_port_t));
fc_port.loop_id = loop_id;
rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
seq_printf(s, "%8phC %02x%02x%02x %d\n",
fc_port.port_name, fc_port.d_id.b.domain,
fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
fc_port.loop_id);
id_iter += ha->gid_list_info_size;
}
out_free_id_list:
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
gid_list, gid_list_dma);
}
return 0;
}
static int
qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
{
scsi_qla_host_t *vha = inode->i_private;
return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
}
static const struct file_operations dfs_tgt_port_database_ops = {
.open = qla2x00_dfs_tgt_port_database_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{ {
...@@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) ...@@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
seq_printf(s, "num Q full sent = %lld\n", seq_printf(s, "num Q full sent = %lld\n",
vha->tgt_counters.num_q_full_sent); vha->tgt_counters.num_q_full_sent);
/* DIF stats */
seq_printf(s, "DIF Inp Bytes = %lld\n",
vha->qla_stats.qla_dif_stats.dif_input_bytes);
seq_printf(s, "DIF Outp Bytes = %lld\n",
vha->qla_stats.qla_dif_stats.dif_output_bytes);
seq_printf(s, "DIF Inp Req = %lld\n",
vha->qla_stats.qla_dif_stats.dif_input_requests);
seq_printf(s, "DIF Outp Req = %lld\n",
vha->qla_stats.qla_dif_stats.dif_output_requests);
seq_printf(s, "DIF Guard err = %d\n",
vha->qla_stats.qla_dif_stats.dif_guard_err);
seq_printf(s, "DIF Ref tag err = %d\n",
vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
seq_printf(s, "DIF App tag err = %d\n",
vha->qla_stats.qla_dif_stats.dif_app_tag_err);
return 0; return 0;
} }
...@@ -281,6 +367,14 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) ...@@ -281,6 +367,14 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
goto out; goto out;
} }
ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
if (!ha->tgt.dfs_tgt_port_database) {
ql_log(ql_log_warn, vha, 0xffff,
"Unable to create debugFS tgt_port_database node.\n");
goto out;
}
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops); &dfs_fce_ops);
if (!ha->dfs_fce) { if (!ha->dfs_fce) {
...@@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha) ...@@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
ha->tgt.dfs_tgt_sess = NULL; ha->tgt.dfs_tgt_sess = NULL;
} }
if (ha->tgt.dfs_tgt_port_database) {
debugfs_remove(ha->tgt.dfs_tgt_port_database);
ha->tgt.dfs_tgt_port_database = NULL;
}
if (ha->dfs_fw_resource_cnt) { if (ha->dfs_fw_resource_cnt) {
debugfs_remove(ha->dfs_fw_resource_cnt); debugfs_remove(ha->dfs_fw_resource_cnt);
ha->dfs_fw_resource_cnt = NULL; ha->dfs_fw_resource_cnt = NULL;
......
...@@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); ...@@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *); uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_abort_cmd(srb_t *);
/* /*
* Global Functions in qla_mid.c source file. * Global Functions in qla_mid.c source file.
...@@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); ...@@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int); extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *); uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *); uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *); uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
...@@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *, ...@@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
extern int extern int
qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
dma_addr_t, uint); dma_addr_t, uint16_t);
extern int qla24xx_abort_command(srb_t *); extern int qla24xx_abort_command(srb_t *);
extern int qla24xx_async_abort_command(srb_t *); extern int qla24xx_async_abort_command(srb_t *);
...@@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); ...@@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int extern int
qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
uint16_t *);
int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
struct port_database_24xx *);
/* /*
* Global Function Prototypes in qla_isr.c source file. * Global Function Prototypes in qla_isr.c source file.
*/ */
...@@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, ...@@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *); void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *); void qlt_unknown_atio_work_fn(struct work_struct *);
void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
void qlt_remove_target_resources(struct qla_hw_data *);
#endif /* _QLA_GBL_H */ #endif /* _QLA_GBL_H */
...@@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) ...@@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
struct srb *sp = s; struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha; struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
uint64_t zero = 0;
struct port_database_24xx *pd; struct port_database_24xx *pd;
fc_port_t *fcport = sp->fcport; fc_port_t *fcport = sp->fcport;
u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
...@@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) ...@@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
/* Check for logged in state. */ rval = __qla24xx_parse_gpdb(vha, fcport, pd);
if (pd->current_login_state != PDS_PRLI_COMPLETE &&
pd->last_login_state != PDS_PRLI_COMPLETE) {
ql_dbg(ql_dbg_mbx, vha, 0xffff,
"Unable to verify login-state (%x/%x) for "
"loop_id %x.\n", pd->current_login_state,
pd->last_login_state, fcport->loop_id);
rval = QLA_FUNCTION_FAILED;
goto gpd_error_out;
}
if (fcport->loop_id == FC_NO_LOOP_ID ||
(memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
memcmp(fcport->port_name, pd->port_name, 8))) {
/* We lost the device mid way. */
rval = QLA_NOT_LOGGED_IN;
goto gpd_error_out;
}
/* Names are little-endian. */
memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
/* Get port_id of device. */
fcport->d_id.b.domain = pd->port_id[0];
fcport->d_id.b.area = pd->port_id[1];
fcport->d_id.b.al_pa = pd->port_id[2];
fcport->d_id.b.rsvd_1 = 0;
/* If not target must be initiator or unknown type. */
if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
fcport->port_type = FCT_INITIATOR;
else
fcport->port_type = FCT_TARGET;
/* Passback COS information. */
fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
FC_COS_CLASS2 : FC_COS_CLASS3;
if (pd->prli_svc_param_word_3[0] & BIT_7) {
fcport->flags |= FCF_CONF_COMP_SUPPORTED;
fcport->conf_compl_supported = 1;
}
gpd_error_out: gpd_error_out:
memset(&ea, 0, sizeof(ea)); memset(&ea, 0, sizeof(ea));
...@@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) ...@@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->login_retry--; fcport->login_retry--;
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
(fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND)) (fcport->fw_login_state == DSC_LS_PRLI_PEND))
return 0; return 0;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
return 0;
}
/* for pure Target Mode. Login will not be initiated */ /* for pure Target Mode. Login will not be initiated */
if (vha->host->active_mode == MODE_TARGET) if (vha->host->active_mode == MODE_TARGET)
return 0; return 0;
...@@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, ...@@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
fcport->flags); fcport->flags);
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
(fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND)) (fcport->fw_login_state == DSC_LS_PRLI_PEND))
return; return;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
return;
}
if (fcport->flags & FCF_ASYNC_SENT) { if (fcport->flags & FCF_ASYNC_SENT) {
fcport->login_retry++; fcport->login_retry++;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags); set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
...@@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res) ...@@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
complete(&abt->u.abt.comp); complete(&abt->u.abt.comp);
} }
static int int
qla24xx_async_abort_cmd(srb_t *cmd_sp) qla24xx_async_abort_cmd(srb_t *cmd_sp)
{ {
scsi_qla_host_t *vha = cmd_sp->vha; scsi_qla_host_t *vha = cmd_sp->vha;
...@@ -3212,6 +3178,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) ...@@ -3212,6 +3178,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
} else { } else {
ql_dbg(ql_dbg_init, vha, 0x00d3, ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n"); "Init Firmware -- success.\n");
ha->flags.fw_started = 1;
} }
return (rval); return (rval);
...@@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) ...@@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
uint8_t domain; uint8_t domain;
char connect_type[22]; char connect_type[22];
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
unsigned long flags;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
port_id_t id;
/* Get host addresses. */ /* Get host addresses. */
rval = qla2x00_get_adapter_id(vha, rval = qla2x00_get_adapter_id(vha,
...@@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) ...@@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
/* Save Host port and loop ID. */ /* Save Host port and loop ID. */
/* byte order - Big Endian */ /* byte order - Big Endian */
vha->d_id.b.domain = domain; id.b.domain = domain;
vha->d_id.b.area = area; id.b.area = area;
vha->d_id.b.al_pa = al_pa; id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
spin_lock_irqsave(&ha->vport_slock, flags); qlt_update_host_map(vha, id);
qlt_update_vp_map(vha, SET_AL_PA);
spin_unlock_irqrestore(&ha->vport_slock, flags);
if (!vha->flags.init_done) if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010, ql_log(ql_log_info, vha, 0x2010,
...@@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) ...@@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_READY); atomic_set(&vha->loop_state, LOOP_READY);
ql_dbg(ql_dbg_disc, vha, 0x2069, ql_dbg(ql_dbg_disc, vha, 0x2069,
"LOOP READY.\n"); "LOOP READY.\n");
ha->flags.fw_init_done = 1;
/* /*
* Process any ATIO queue entries that came in * Process any ATIO queue entries that came in
...@@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) ...@@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
} }
} }
atomic_dec(&vha->vref_count); atomic_dec(&vha->vref_count);
wake_up(&vha->vref_waitq);
} }
spin_unlock_irqrestore(&ha->vport_slock, flags); spin_unlock_irqrestore(&ha->vport_slock, flags);
} }
...@@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) ...@@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha))) if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha); ha->isp_ops->reset_chip(vha);
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
ha->flags.fw_started = 0;
ha->flags.fw_init_done = 0;
ha->chip_reset++; ha->chip_reset++;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
...@@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) ...@@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
return; return;
if (!ha->fw_major_version) if (!ha->fw_major_version)
return; return;
if (!ha->flags.fw_started)
return;
ret = qla2x00_stop_firmware(vha); ret = qla2x00_stop_firmware(vha);
for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
...@@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) ...@@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
"Attempting retry of stop-firmware command.\n"); "Attempting retry of stop-firmware command.\n");
ret = qla2x00_stop_firmware(vha); ret = qla2x00_stop_firmware(vha);
} }
ha->flags.fw_started = 0;
ha->flags.fw_init_done = 0;
} }
int int
......
...@@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, ...@@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
int int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{ {
void *next_dsd; void *next_dsd;
uint8_t avail_dsds = 0; uint8_t avail_dsds = 0;
...@@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, ...@@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
struct scatterlist *sg_prot; struct scatterlist *sg_prot;
uint32_t *cur_dsd = dsd; uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds; uint16_t used_dsds = tot_dsds;
uint32_t prot_int; /* protection interval */ uint32_t prot_int; /* protection interval */
uint32_t partial; uint32_t partial;
struct qla2_sgx sgx; struct qla2_sgx sgx;
...@@ -966,7 +965,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, ...@@ -966,7 +965,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
} else { } else {
list_add_tail(&dsd_ptr->list, list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list)); &(tc->ctx->dsd_list));
tc->ctx_dsd_alloced = 1; *tc->ctx_dsd_alloced = 1;
} }
...@@ -1005,7 +1004,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, ...@@ -1005,7 +1004,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
int int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint16_t tot_dsds, struct qla_tgt_cmd *tc) uint16_t tot_dsds, struct qla_tc_param *tc)
{ {
void *next_dsd; void *next_dsd;
uint8_t avail_dsds = 0; uint8_t avail_dsds = 0;
...@@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, ...@@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
} else { } else {
list_add_tail(&dsd_ptr->list, list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list)); &(tc->ctx->dsd_list));
tc->ctx_dsd_alloced = 1; *tc->ctx_dsd_alloced = 1;
} }
/* add new list to cmd iocb or last list */ /* add new list to cmd iocb or last list */
...@@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, ...@@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
int int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{ {
void *next_dsd; void *next_dsd;
uint8_t avail_dsds = 0; uint8_t avail_dsds = 0;
...@@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, ...@@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
} else { } else {
list_add_tail(&dsd_ptr->list, list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list)); &(tc->ctx->dsd_list));
tc->ctx_dsd_alloced = 1; *tc->ctx_dsd_alloced = 1;
} }
/* add new list to cmd iocb or last list */ /* add new list to cmd iocb or last list */
......
...@@ -708,6 +708,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) ...@@ -708,6 +708,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
"mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
ha->isp_ops->fw_dump(vha, 1); ha->isp_ops->fw_dump(vha, 1);
ha->flags.fw_init_done = 0;
ha->flags.fw_started = 0;
if (IS_FWI2_CAPABLE(ha)) { if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) { if (mb[1] == 0 && mb[2] == 0) {
...@@ -761,6 +763,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) ...@@ -761,6 +763,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
break; break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
ha->flags.lip_ae = 1;
ha->flags.n2n_ae = 0;
ql_dbg(ql_dbg_async, vha, 0x5009, ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]); "LIP occurred (%x).\n", mb[1]);
...@@ -797,6 +802,10 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) ...@@ -797,6 +802,10 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
break; break;
case MBA_LOOP_DOWN: /* Loop Down Event */ case MBA_LOOP_DOWN: /* Loop Down Event */
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
? RD_REG_WORD(&reg24->mailbox4) : 0; ? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4]) mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
...@@ -866,6 +875,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) ...@@ -866,6 +875,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
/* case MBA_DCBX_COMPLETE: */ /* case MBA_DCBX_COMPLETE: */
case MBA_POINT_TO_POINT: /* Point-to-Point */ case MBA_POINT_TO_POINT: /* Point-to-Point */
ha->flags.lip_ae = 0;
ha->flags.n2n_ae = 1;
if (IS_QLA2100(ha)) if (IS_QLA2100(ha))
break; break;
...@@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, ...@@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0; QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) { if (logio->entry_status) {
ql_log(ql_log_warn, fcport->vha, 0x5034, ql_log(ql_log_warn, fcport->vha, 0x5034,
"Async-%s error entry - hdl=%x" "Async-%s error entry - %8phC hdl=%x"
"portid=%02x%02x%02x entry-status=%x.\n", "portid=%02x%02x%02x entry-status=%x.\n",
type, sp->handle, fcport->d_id.b.domain, type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->d_id.b.al_pa,
logio->entry_status); logio->entry_status);
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
...@@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, ...@@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
ql_dbg(ql_dbg_async, fcport->vha, 0x5036, ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
"Async-%s complete - hdl=%x portid=%02x%02x%02x " "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
"iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, "iop0=%x.\n", type, fcport->port_name, sp->handle,
fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0])); le32_to_cpu(logio->io_parameter[0]));
...@@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, ...@@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_NPORT_USED: case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED; data[0] = MBS_LOOP_ID_USED;
break; break;
case LSC_SCODE_CMD_FAILED:
if (iop[1] == 0x0606) {
/*
* PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
* Target side acked.
*/
data[0] = MBS_COMMAND_COMPLETE;
goto logio_done;
}
data[0] = MBS_COMMAND_ERROR;
break;
case LSC_SCODE_NOXCB: case LSC_SCODE_NOXCB:
vha->hw->exch_starvation++; vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) { if (vha->hw->exch_starvation > 5) {
...@@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, ...@@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
} }
ql_dbg(ql_dbg_async, fcport->vha, 0x5037, ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
"Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
"iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, "iop0=%x iop1=%x.\n", type, fcport->port_name,
sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->d_id.b.al_pa,
le16_to_cpu(logio->comp_status), le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]), le32_to_cpu(logio->io_parameter[0]),
...@@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, ...@@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return; return;
abt = &sp->u.iocb_cmd; abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
sp->done(sp, 0); sp->done(sp, 0);
} }
...@@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, ...@@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct sts_entry_24xx *pkt; struct sts_entry_24xx *pkt;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
if (!vha->flags.online) if (!ha->flags.fw_started)
return; return;
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
......
This diff is collapsed.
...@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) ...@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed * ensures no active vp_list traversal while the vport is removed
* from the queue) * from the queue)
*/ */
spin_lock_irqsave(&ha->vport_slock, flags); wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
while (atomic_read(&vha->vref_count)) { 10*HZ);
spin_unlock_irqrestore(&ha->vport_slock, flags);
msleep(500);
spin_lock_irqsave(&ha->vport_slock, flags); spin_lock_irqsave(&ha->vport_slock, flags);
if (atomic_read(&vha->vref_count)) {
ql_dbg(ql_dbg_vport, vha, 0xfffa,
"vha->vref_count=%u timeout\n", vha->vref_count.counter);
vha->vref_count = (atomic_t)ATOMIC_INIT(0);
} }
list_del(&vha->list); list_del(&vha->list);
qlt_update_vp_map(vha, RESET_VP_IDX); qlt_update_vp_map(vha, RESET_VP_IDX);
...@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) ...@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
spin_lock_irqsave(&ha->vport_slock, flags); spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vha->vref_count); atomic_dec(&vha->vref_count);
wake_up(&vha->vref_waitq);
} }
i++; i++;
} }
......
...@@ -2560,6 +2560,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) ...@@ -2560,6 +2560,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
return atomic_read(&vha->loop_state) == LOOP_READY; return atomic_read(&vha->loop_state) == LOOP_READY;
} }
static void qla2x00_iocb_work_fn(struct work_struct *work)
{
struct scsi_qla_host *vha = container_of(work,
struct scsi_qla_host, iocb_work);
int cnt = 0;
while (!list_empty(&vha->work_list)) {
qla2x00_do_work(vha);
cnt++;
if (cnt > 10)
break;
}
}
/* /*
* PCI driver interface * PCI driver interface
*/ */
...@@ -3078,6 +3092,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -3078,6 +3092,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
*/ */
qla2xxx_wake_dpc(base_vha); qla2xxx_wake_dpc(base_vha);
INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
...@@ -3469,6 +3484,7 @@ qla2x00_remove_one(struct pci_dev *pdev) ...@@ -3469,6 +3484,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_free_sysfs_attr(base_vha, true); qla2x00_free_sysfs_attr(base_vha, true);
fc_remove_host(base_vha->host); fc_remove_host(base_vha->host);
qlt_remove_target_resources(ha);
scsi_remove_host(base_vha->host); scsi_remove_host(base_vha->host);
...@@ -4268,6 +4284,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, ...@@ -4268,6 +4284,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock); spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock); spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ); init_waitqueue_head(&vha->fcport_waitQ);
init_waitqueue_head(&vha->vref_waitq);
vha->gnl.size = sizeof(struct get_name_list_extended) * vha->gnl.size = sizeof(struct get_name_list_extended) *
(ha->max_loop_id + 1); (ha->max_loop_id + 1);
...@@ -4319,6 +4336,10 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) ...@@ -4319,6 +4336,10 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
spin_lock_irqsave(&vha->work_lock, flags); spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list); list_add_tail(&e->list, &vha->work_list);
spin_unlock_irqrestore(&vha->work_lock, flags); spin_unlock_irqrestore(&vha->work_lock, flags);
if (QLA_EARLY_LINKUP(vha->hw))
schedule_work(&vha->iocb_work);
else
qla2xxx_wake_dpc(vha); qla2xxx_wake_dpc(vha);
return QLA_SUCCESS; return QLA_SUCCESS;
......
This diff is collapsed.
...@@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio) ...@@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
atio->u.isp24.fcp_cmnd.add_cdb_len = 0; atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
} }
static inline int get_datalen_for_atio(struct atio_from_isp *atio)
{
int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
return (be32_to_cpu(get_unaligned((uint32_t *)
&atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
}
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
/* /*
...@@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl { ...@@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int); unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *); void (*handle_data)(struct qla_tgt_cmd *);
void (*handle_dif_err)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
uint32_t); uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *); void (*free_cmd)(struct qla_tgt_cmd *);
...@@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl { ...@@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl {
void (*clear_nacl_from_fcport_map)(struct fc_port *); void (*clear_nacl_from_fcport_map)(struct fc_port *);
void (*put_sess)(struct fc_port *); void (*put_sess)(struct fc_port *);
void (*shutdown_sess)(struct fc_port *); void (*shutdown_sess)(struct fc_port *);
int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
int (*chk_dif_tags)(uint32_t tag);
void (*add_target)(struct scsi_qla_host *);
}; };
int qla2x00_wait_for_hba_online(struct scsi_qla_host *); int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
...@@ -845,6 +855,7 @@ enum trace_flags { ...@@ -845,6 +855,7 @@ enum trace_flags {
TRC_CMD_FREE = BIT_17, TRC_CMD_FREE = BIT_17,
TRC_DATA_IN = BIT_18, TRC_DATA_IN = BIT_18,
TRC_ABORT = BIT_19, TRC_ABORT = BIT_19,
TRC_DIF_ERR = BIT_20,
}; };
struct qla_tgt_cmd { struct qla_tgt_cmd {
...@@ -862,7 +873,6 @@ struct qla_tgt_cmd { ...@@ -862,7 +873,6 @@ struct qla_tgt_cmd {
unsigned int sg_mapped:1; unsigned int sg_mapped:1;
unsigned int free_sg:1; unsigned int free_sg:1;
unsigned int write_data_transferred:1; unsigned int write_data_transferred:1;
unsigned int ctx_dsd_alloced:1;
unsigned int q_full:1; unsigned int q_full:1;
unsigned int term_exchg:1; unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1; unsigned int cmd_sent_to_fw:1;
...@@ -885,11 +895,25 @@ struct qla_tgt_cmd { ...@@ -885,11 +895,25 @@ struct qla_tgt_cmd {
struct list_head cmd_list; struct list_head cmd_list;
struct atio_from_isp atio; struct atio_from_isp atio;
/* t10dif */
uint8_t ctx_dsd_alloced;
/* T10-DIF */
#define DIF_ERR_NONE 0
#define DIF_ERR_GRD 1
#define DIF_ERR_REF 2
#define DIF_ERR_APP 3
int8_t dif_err_code;
struct scatterlist *prot_sg; struct scatterlist *prot_sg;
uint32_t prot_sg_cnt; uint32_t prot_sg_cnt;
uint32_t blk_sz; uint32_t blk_sz, num_blks;
uint8_t scsi_status, sense_key, asc, ascq;
struct crc_context *ctx; struct crc_context *ctx;
uint8_t *cdb;
uint64_t lba;
uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
uint32_t a_ref_tag, e_ref_tag;
uint64_t jiffies_at_alloc; uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free; uint64_t jiffies_at_free;
...@@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *); ...@@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
extern void qlt_logo_completion_handler(fc_port_t *, int); extern void qlt_logo_completion_handler(fc_port_t *, int);
extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
uint8_t, uint8_t, uint8_t);
#endif /* __QLA_TARGET_H */ #endif /* __QLA_TARGET_H */
...@@ -7,9 +7,9 @@ ...@@ -7,9 +7,9 @@
/* /*
* Driver version * Driver version
*/ */
#define QLA2XXX_VERSION "8.07.00.38-k" #define QLA2XXX_VERSION "9.00.00.00-k"
#define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MAJOR_VER 9
#define QLA_DRIVER_MINOR_VER 7 #define QLA_DRIVER_MINOR_VER 0
#define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0 #define QLA_DRIVER_BETA_VER 0
...@@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) ...@@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return; return;
} }
switch (cmd->dif_err_code) {
case DIF_ERR_GRD:
cmd->se_cmd.pi_err =
TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
break;
case DIF_ERR_REF:
cmd->se_cmd.pi_err =
TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
break;
case DIF_ERR_APP:
cmd->se_cmd.pi_err =
TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
break;
case DIF_ERR_NONE:
default:
break;
}
if (cmd->se_cmd.pi_err) if (cmd->se_cmd.pi_err)
transport_generic_request_failure(&cmd->se_cmd, transport_generic_request_failure(&cmd->se_cmd,
cmd->se_cmd.pi_err); cmd->se_cmd.pi_err);
...@@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) ...@@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
} }
static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
{ {
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); return 0;
/* take an extra kref to prevent cmd free too early.
* need to wait for SCSI status/check condition to
* finish responding generate by transport_generic_request_failure.
*/
kref_get(&cmd->se_cmd.cmd_kref);
transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
} }
/* static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
* Called from qla_target.c:qlt_do_ctio_completion() uint16_t *pfw_prot_opts)
*/
static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
{ {
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); struct se_cmd *se_cmd = &cmd->se_cmd;
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
*pfw_prot_opts |= PO_DISABLE_GUARD_CHECK;
if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG))
*pfw_prot_opts |= PO_DIS_APP_TAG_VALD;
return 0;
} }
/* /*
...@@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, ...@@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_cmd = tcm_qla2xxx_handle_cmd, .handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data, .handle_data = tcm_qla2xxx_handle_data,
.handle_dif_err = tcm_qla2xxx_handle_dif_err,
.handle_tmr = tcm_qla2xxx_handle_tmr, .handle_tmr = tcm_qla2xxx_handle_tmr,
.free_cmd = tcm_qla2xxx_free_cmd, .free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd, .free_mcmd = tcm_qla2xxx_free_mcmd,
...@@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { ...@@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
.put_sess = tcm_qla2xxx_put_sess, .put_sess = tcm_qla2xxx_put_sess,
.shutdown_sess = tcm_qla2xxx_shutdown_sess, .shutdown_sess = tcm_qla2xxx_shutdown_sess,
.get_dif_tags = tcm_qla2xxx_dif_tags,
.chk_dif_tags = tcm_qla2xxx_chk_dif_tags,
}; };
static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#include "target_core_ua.h" #include "target_core_ua.h"
static sense_reason_t core_alua_check_transition(int state, int valid, static sense_reason_t core_alua_check_transition(int state, int valid,
int *primary); int *primary, int explicit);
static int core_alua_set_tg_pt_secondary_state( static int core_alua_set_tg_pt_secondary_state(
struct se_lun *lun, int explicit, int offline); struct se_lun *lun, int explicit, int offline);
...@@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) ...@@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* the state is a primary or secondary target port asymmetric * the state is a primary or secondary target port asymmetric
* access state. * access state.
*/ */
rc = core_alua_check_transition(alua_access_state, rc = core_alua_check_transition(alua_access_state, valid_states,
valid_states, &primary); &primary, 1);
if (rc) { if (rc) {
/* /*
* If the SET TARGET PORT GROUPS attempts to establish * If the SET TARGET PORT GROUPS attempts to establish
...@@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd) ...@@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd)
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0; return 0;
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return 0; return 0;
/* /*
...@@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd) ...@@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd)
* Check implicit and explicit ALUA state change request. * Check implicit and explicit ALUA state change request.
*/ */
static sense_reason_t static sense_reason_t
core_alua_check_transition(int state, int valid, int *primary) core_alua_check_transition(int state, int valid, int *primary, int explicit)
{ {
/* /*
* OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
...@@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary) ...@@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary)
*primary = 0; *primary = 0;
break; break;
case ALUA_ACCESS_STATE_TRANSITION: case ALUA_ACCESS_STATE_TRANSITION:
if (!(valid & ALUA_T_SUP) || explicit)
/* /*
* Transitioning is set internally, and * Transitioning is set internally and by tcmu daemon,
* cannot be selected manually. * and cannot be selected through a STPG.
*/ */
goto not_supported; goto not_supported;
*primary = 0;
break;
default: default:
pr_err("Unknown ALUA access state: 0x%02x\n", state); pr_err("Unknown ALUA access state: 0x%02x\n", state);
return TCM_INVALID_PARAMETER_LIST; return TCM_INVALID_PARAMETER_LIST;
...@@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) ...@@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
static void core_alua_do_transition_tg_pt_work(struct work_struct *work) static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
{ {
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
...@@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt( ...@@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt(
if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
return 0; return 0;
if (new_state == ALUA_ACCESS_STATE_TRANSITION) if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
return -EAGAIN; return -EAGAIN;
/* /*
* Flush any pending transitions * Flush any pending transitions
*/ */
if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && if (!explicit)
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
ALUA_ACCESS_STATE_TRANSITION) {
/* Just in case */
tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
return 0;
}
/* /*
* Save the old primary ALUA access state, and set the current state * Save the old primary ALUA access state, and set the current state
* to ALUA_ACCESS_STATE_TRANSITION. * to ALUA_ACCESS_STATE_TRANSITION.
*/ */
tg_pt_gp->tg_pt_gp_alua_previous_state =
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_TRANSITION); ALUA_ACCESS_STATE_TRANSITION);
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
...@@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt( ...@@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt(
core_alua_queue_state_change_ua(tg_pt_gp); core_alua_queue_state_change_ua(tg_pt_gp);
if (new_state == ALUA_ACCESS_STATE_TRANSITION)
return 0;
tg_pt_gp->tg_pt_gp_alua_previous_state =
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
/* /*
* Check for the optional ALUA primary state transition delay * Check for the optional ALUA primary state transition delay
*/ */
...@@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt( ...@@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt(
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
unsigned long transition_tmo; if (explicit) {
transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
&tg_pt_gp->tg_pt_gp_transition_work,
transition_tmo);
} else {
tg_pt_gp->tg_pt_gp_transition_complete = &wait; tg_pt_gp->tg_pt_gp_transition_complete = &wait;
queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
&tg_pt_gp->tg_pt_gp_transition_work, 0);
wait_for_completion(&wait); wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL; tg_pt_gp->tg_pt_gp_transition_complete = NULL;
} }
...@@ -1149,8 +1138,12 @@ int core_alua_do_port_transition( ...@@ -1149,8 +1138,12 @@ int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
int primary, valid_states, rc = 0; int primary, valid_states, rc = 0;
if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return -ENODEV;
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
if (core_alua_check_transition(new_state, valid_states, &primary) != 0) if (core_alua_check_transition(new_state, valid_states, &primary,
explicit) != 0)
return -EINVAL; return -EINVAL;
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
...@@ -1695,7 +1688,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, ...@@ -1695,7 +1688,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
core_alua_do_transition_tg_pt_work); core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev; tg_pt_gp->tg_pt_gp_dev = dev;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
...@@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp( ...@@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp(
dev->t10_alua.alua_tg_pt_gps_counter--; dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/* /*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by * Allow a struct t10_alua_tg_pt_gp_member * referenced by
...@@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info( ...@@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
unsigned char buf[TG_PT_GROUP_NAME_BUF]; unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0; int move = 0;
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV; return -ENODEV;
...@@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit( ...@@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp; unsigned long tmp;
int ret; int ret;
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV; return -ENODEV;
...@@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata( ...@@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev) int core_setup_alua(struct se_device *dev)
{ {
if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && if (!(dev->transport->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_lu_gp_member *lu_gp_mem;
......
...@@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) ...@@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->aborted_task()\n"); pr_err("Missing tfo->aborted_task()\n");
return -EINVAL; return -EINVAL;
} }
if (!tfo->check_stop_free) {
pr_err("Missing tfo->check_stop_free()\n");
return -EINVAL;
}
/* /*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
......
...@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, ...@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
buf = kzalloc(12, GFP_KERNEL); buf = kzalloc(12, GFP_KERNEL);
if (!buf) if (!buf)
return; goto out_free;
memset(cdb, 0, MAX_COMMAND_SIZE); memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE; cdb[0] = MODE_SENSE;
...@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, ...@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
* If MODE_SENSE still returns zero, set the default value to 1024. * If MODE_SENSE still returns zero, set the default value to 1024.
*/ */
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
out_free:
if (!sdev->sector_size) if (!sdev->sector_size)
sdev->sector_size = 1024; sdev->sector_size = 1024;
out_free:
kfree(buf); kfree(buf);
} }
...@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, ...@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
sd->lun, sd->queue_depth); sd->lun, sd->queue_depth);
} }
dev->dev_attrib.hw_block_size = sd->sector_size; dev->dev_attrib.hw_block_size =
min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors = dev->dev_attrib.hw_max_sectors =
min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth; dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/* /*
...@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, ...@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
/* /*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/ */
if (sd->type == TYPE_TAPE) if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd); pscsi_tape_read_blocksize(dev, sd);
dev->dev_attrib.hw_block_size = sd->sector_size;
}
return 0; return 0;
} }
...@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) ...@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
/* /*
* Called with struct Scsi_Host->host_lock called. * Called with struct Scsi_Host->host_lock called.
*/ */
static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock) __releases(sh->host_lock)
{ {
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
...@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) ...@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
return 0; return 0;
} }
/*
* Called with struct Scsi_Host->host_lock called.
*/
static int pscsi_create_type_other(struct se_device *dev,
struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
int ret;
spin_unlock_irq(sh->host_lock);
ret = pscsi_add_device_to_list(dev, sd);
if (ret)
return ret;
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
return 0;
}
static int pscsi_configure_device(struct se_device *dev) static int pscsi_configure_device(struct se_device *dev)
{ {
struct se_hba *hba = dev->se_hba; struct se_hba *hba = dev->se_hba;
...@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev) ...@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
case TYPE_DISK: case TYPE_DISK:
ret = pscsi_create_type_disk(dev, sd); ret = pscsi_create_type_disk(dev, sd);
break; break;
case TYPE_ROM:
ret = pscsi_create_type_rom(dev, sd);
break;
default: default:
ret = pscsi_create_type_other(dev, sd); ret = pscsi_create_type_nondisk(dev, sd);
break; break;
} }
...@@ -611,7 +590,6 @@ static void pscsi_free_device(struct se_device *dev) ...@@ -611,7 +590,6 @@ static void pscsi_free_device(struct se_device *dev)
else if (pdv->pdv_lld_host) else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host); scsi_host_put(pdv->pdv_lld_host);
if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
scsi_device_put(sd); scsi_device_put(sd);
pdv->pdv_sd = NULL; pdv->pdv_sd = NULL;
...@@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) ...@@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
if (pdv->pdv_bd && pdv->pdv_bd->bd_part) if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects; return pdv->pdv_bd->bd_part->nr_sects;
dump_stack();
return 0; return 0;
} }
...@@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate) ...@@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate)
static const struct target_backend_ops pscsi_ops = { static const struct target_backend_ops pscsi_ops = {
.name = "pscsi", .name = "pscsi",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_flags = TRANSPORT_FLAG_PASSTHROUGH, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH |
TRANSPORT_FLAG_PASSTHROUGH_ALUA,
.attach_hba = pscsi_attach_hba, .attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba, .detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba, .pmode_enable_hba = pscsi_pmode_enable_hba,
......
...@@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret; return ret;
break; break;
case VERIFY: case VERIFY:
case VERIFY_16:
size = 0; size = 0;
if (cdb[0] == VERIFY) {
sectors = transport_get_sectors_10(cdb); sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb); cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
}
cmd->execute_cmd = sbc_emulate_noop; cmd->execute_cmd = sbc_emulate_noop;
goto check_lba; goto check_lba;
case REZERO_UNIT: case REZERO_UNIT:
......
...@@ -602,7 +602,8 @@ int core_tpg_add_lun( ...@@ -602,7 +602,8 @@ int core_tpg_add_lun(
if (ret) if (ret)
goto out_kill_ref; goto out_kill_ref;
if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && if (!(dev->transport->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
......
...@@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) ...@@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
* Fabric modules are expected to return '1' here if the se_cmd being * Fabric modules are expected to return '1' here if the se_cmd being
* passed is released at this point, or zero if not being released. * passed is released at this point, or zero if not being released.
*/ */
return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) return cmd->se_tfo->check_stop_free(cmd);
: 0;
} }
static void transport_lun_remove_cmd(struct se_cmd *cmd) static void transport_lun_remove_cmd(struct se_cmd *cmd)
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/configfs.h>
#include <net/genetlink.h> #include <net/genetlink.h>
#include <scsi/scsi_common.h> #include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h> #include <scsi/scsi_proto.h>
...@@ -112,6 +113,7 @@ struct tcmu_dev { ...@@ -112,6 +113,7 @@ struct tcmu_dev {
spinlock_t commands_lock; spinlock_t commands_lock;
struct timer_list timeout; struct timer_list timeout;
unsigned int cmd_time_out;
char dev_config[TCMU_CONFIG_LEN]; char dev_config[TCMU_CONFIG_LEN];
}; };
...@@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) ...@@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev; tcmu_cmd->tcmu_dev = udev;
tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); if (udev->cmd_time_out)
tcmu_cmd->deadline = jiffies +
msecs_to_jiffies(udev->cmd_time_out);
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
spin_lock_irq(&udev->commands_lock); spin_lock_irq(&udev->commands_lock);
...@@ -451,6 +455,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) ...@@ -451,6 +455,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
pr_debug("sleeping for ring space\n"); pr_debug("sleeping for ring space\n");
spin_unlock_irq(&udev->cmdr_lock); spin_unlock_irq(&udev->cmdr_lock);
if (udev->cmd_time_out)
ret = schedule_timeout(
msecs_to_jiffies(udev->cmd_time_out));
else
ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
finish_wait(&udev->wait_cmdr, &__wait); finish_wait(&udev->wait_cmdr, &__wait);
if (!ret) { if (!ret) {
...@@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) ...@@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
/* TODO: only if FLUSH and FUA? */ /* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info); uio_event_notify(&udev->uio_info);
mod_timer(&udev->timeout, if (udev->cmd_time_out)
round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); mod_timer(&udev->timeout, round_jiffies_up(jiffies +
msecs_to_jiffies(udev->cmd_time_out)));
return TCM_NO_SENSE; return TCM_NO_SENSE;
} }
...@@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) ...@@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
} }
udev->hba = hba; udev->hba = hba;
udev->cmd_time_out = TCMU_TIME_OUT;
init_waitqueue_head(&udev->wait_cmdr); init_waitqueue_head(&udev->wait_cmdr);
spin_lock_init(&udev->cmdr_lock); spin_lock_init(&udev->cmdr_lock);
...@@ -960,6 +970,7 @@ static int tcmu_configure_device(struct se_device *dev) ...@@ -960,6 +970,7 @@ static int tcmu_configure_device(struct se_device *dev)
if (dev->dev_attrib.hw_block_size == 0) if (dev->dev_attrib.hw_block_size == 0)
dev->dev_attrib.hw_block_size = 512; dev->dev_attrib.hw_block_size = 512;
/* Other attributes can be configured in userspace */ /* Other attributes can be configured in userspace */
if (!dev->dev_attrib.hw_max_sectors)
dev->dev_attrib.hw_max_sectors = 128; dev->dev_attrib.hw_max_sectors = 128;
dev->dev_attrib.hw_queue_depth = 128; dev->dev_attrib.hw_queue_depth = 128;
...@@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p) ...@@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
kfree(udev); kfree(udev);
} }
static bool tcmu_dev_configured(struct tcmu_dev *udev)
{
return udev->uio_info.uio_dev ? true : false;
}
static void tcmu_free_device(struct se_device *dev) static void tcmu_free_device(struct se_device *dev)
{ {
struct tcmu_dev *udev = TCMU_DEV(dev); struct tcmu_dev *udev = TCMU_DEV(dev);
...@@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev) ...@@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev)
spin_unlock_irq(&udev->commands_lock); spin_unlock_irq(&udev->commands_lock);
WARN_ON(!all_expired); WARN_ON(!all_expired);
/* Device was configured */ if (tcmu_dev_configured(udev)) {
if (udev->uio_info.uio_dev) {
tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
udev->uio_info.uio_dev->minor); udev->uio_info.uio_dev->minor);
...@@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev) ...@@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev)
} }
enum { enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
Opt_err,
}; };
static match_table_t tokens = { static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"}, {Opt_dev_config, "dev_config=%s"},
{Opt_dev_size, "dev_size=%u"}, {Opt_dev_size, "dev_size=%u"},
{Opt_hw_block_size, "hw_block_size=%u"}, {Opt_hw_block_size, "hw_block_size=%u"},
{Opt_hw_max_sectors, "hw_max_sectors=%u"},
{Opt_err, NULL} {Opt_err, NULL}
}; };
static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
{
unsigned long tmp_ul;
char *arg_p;
int ret;
arg_p = match_strdup(arg);
if (!arg_p)
return -ENOMEM;
ret = kstrtoul(arg_p, 0, &tmp_ul);
kfree(arg_p);
if (ret < 0) {
pr_err("kstrtoul() failed for dev attrib\n");
return ret;
}
if (!tmp_ul) {
pr_err("dev attrib must be nonzero\n");
return -EINVAL;
}
*dev_attrib = tmp_ul;
return 0;
}
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count) const char *page, ssize_t count)
{ {
...@@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, ...@@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
char *orig, *ptr, *opts, *arg_p; char *orig, *ptr, *opts, *arg_p;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, token; int ret = 0, token;
unsigned long tmp_ul;
opts = kstrdup(page, GFP_KERNEL); opts = kstrdup(page, GFP_KERNEL);
if (!opts) if (!opts)
...@@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, ...@@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
pr_err("kstrtoul() failed for dev_size=\n"); pr_err("kstrtoul() failed for dev_size=\n");
break; break;
case Opt_hw_block_size: case Opt_hw_block_size:
arg_p = match_strdup(&args[0]); ret = tcmu_set_dev_attrib(&args[0],
if (!arg_p) { &(dev->dev_attrib.hw_block_size));
ret = -ENOMEM;
break;
}
ret = kstrtoul(arg_p, 0, &tmp_ul);
kfree(arg_p);
if (ret < 0) {
pr_err("kstrtoul() failed for hw_block_size=\n");
break;
}
if (!tmp_ul) {
pr_err("hw_block_size must be nonzero\n");
break; break;
} case Opt_hw_max_sectors:
dev->dev_attrib.hw_block_size = tmp_ul; ret = tcmu_set_dev_attrib(&args[0],
&(dev->dev_attrib.hw_max_sectors));
break; break;
default: default:
break; break;
} }
if (ret)
break;
} }
kfree(orig); kfree(orig);
...@@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd) ...@@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd)
return passthrough_parse_cdb(cmd, tcmu_queue_cmd); return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
} }
static const struct target_backend_ops tcmu_ops = { static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = container_of(da->da_dev,
struct tcmu_dev, se_dev);
return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
}
static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
size_t count)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = container_of(da->da_dev,
struct tcmu_dev, se_dev);
u32 val;
int ret;
if (da->da_dev->export_count) {
pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
return -EINVAL;
}
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (!val) {
pr_err("Illegal value for cmd_time_out\n");
return -EINVAL;
}
udev->cmd_time_out = val * MSEC_PER_SEC;
return count;
}
CONFIGFS_ATTR(tcmu_, cmd_time_out);
static struct configfs_attribute **tcmu_attrs;
static struct target_backend_ops tcmu_ops = {
.name = "user", .name = "user",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_flags = TRANSPORT_FLAG_PASSTHROUGH, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
...@@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = { ...@@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = {
.show_configfs_dev_params = tcmu_show_configfs_dev_params, .show_configfs_dev_params = tcmu_show_configfs_dev_params,
.get_device_type = sbc_get_device_type, .get_device_type = sbc_get_device_type,
.get_blocks = tcmu_get_blocks, .get_blocks = tcmu_get_blocks,
.tb_dev_attrib_attrs = passthrough_attrib_attrs, .tb_dev_attrib_attrs = NULL,
}; };
static int __init tcmu_module_init(void) static int __init tcmu_module_init(void)
{ {
int ret; int ret, i, len = 0;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
...@@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void) ...@@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void)
goto out_unreg_device; goto out_unreg_device;
} }
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
len += sizeof(struct configfs_attribute *);
}
len += sizeof(struct configfs_attribute *) * 2;
tcmu_attrs = kzalloc(len, GFP_KERNEL);
if (!tcmu_attrs) {
ret = -ENOMEM;
goto out_unreg_genl;
}
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
tcmu_attrs[i] = passthrough_attrib_attrs[i];
}
tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
ret = transport_backend_register(&tcmu_ops); ret = transport_backend_register(&tcmu_ops);
if (ret) if (ret)
goto out_unreg_genl; goto out_attrs;
return 0; return 0;
out_attrs:
kfree(tcmu_attrs);
out_unreg_genl: out_unreg_genl:
genl_unregister_family(&tcmu_genl_family); genl_unregister_family(&tcmu_genl_family);
out_unreg_device: out_unreg_device:
...@@ -1194,6 +1287,7 @@ static int __init tcmu_module_init(void) ...@@ -1194,6 +1287,7 @@ static int __init tcmu_module_init(void)
static void __exit tcmu_module_exit(void) static void __exit tcmu_module_exit(void)
{ {
target_backend_unregister(&tcmu_ops); target_backend_unregister(&tcmu_ops);
kfree(tcmu_attrs);
genl_unregister_family(&tcmu_genl_family); genl_unregister_family(&tcmu_genl_family);
root_device_unregister(tcmu_root_device); root_device_unregister(tcmu_root_device);
kmem_cache_destroy(tcmu_cmd_cache); kmem_cache_destroy(tcmu_cmd_cache);
......
...@@ -4,7 +4,12 @@ ...@@ -4,7 +4,12 @@
#include <linux/types.h> #include <linux/types.h>
#include <target/target_core_base.h> #include <target/target_core_base.h>
#define TRANSPORT_FLAG_PASSTHROUGH 1 #define TRANSPORT_FLAG_PASSTHROUGH 0x1
/*
* ALUA commands, state checks and setup operations are handled by the
* backend module.
*/
#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2
struct request_queue; struct request_queue;
struct scatterlist; struct scatterlist;
......
...@@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp { ...@@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp {
struct list_head tg_pt_gp_lun_list; struct list_head tg_pt_gp_lun_list;
struct se_lun *tg_pt_gp_alua_lun; struct se_lun *tg_pt_gp_alua_lun;
struct se_node_acl *tg_pt_gp_alua_nacl; struct se_node_acl *tg_pt_gp_alua_nacl;
struct delayed_work tg_pt_gp_transition_work; struct work_struct tg_pt_gp_transition_work;
struct completion *tg_pt_gp_transition_complete; struct completion *tg_pt_gp_transition_complete;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment