Commit adf653f9 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Nicholas Bellinger

target: Subsume se_port + t10_alua_tg_pt_gp_member into se_lun

This patch eliminates all se_port + t10_alua_tg_pt_gp_member usage,
and converts current users to direct se_lun pointer dereference.

This includes the removal of core_export_port(), core_release_port()
core_dev_export() and core_dev_unexport().  Along with conversion
of special case se_lun pointer dereference within PR ALL_TG_PT=1
and ALUA access state transition UNIT_ATTENTION handling.

Also, update core_enable_device_list_for_node() to reference the
new per se_lun->lun_deve_list when creating a new entry, or
replacing an existing one via RCU.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent b3eeea66
This diff is collapsed.
...@@ -85,7 +85,6 @@ ...@@ -85,7 +85,6 @@
extern struct kmem_cache *t10_alua_lu_gp_cache; extern struct kmem_cache *t10_alua_lu_gp_cache;
extern struct kmem_cache *t10_alua_lu_gp_mem_cache; extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern struct kmem_cache *t10_alua_lba_map_cache; extern struct kmem_cache *t10_alua_lba_map_cache;
extern struct kmem_cache *t10_alua_lba_map_mem_cache; extern struct kmem_cache *t10_alua_lba_map_mem_cache;
...@@ -94,7 +93,7 @@ extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); ...@@ -94,7 +93,7 @@ extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_report_referrals(struct se_cmd *); extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *); extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *, struct se_device *, struct se_lun *,
struct se_node_acl *, int, int); struct se_node_acl *, int, int);
extern char *core_alua_dump_status(int); extern char *core_alua_dump_status(int);
extern struct t10_alua_lba_map *core_alua_allocate_lba_map( extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
...@@ -117,14 +116,11 @@ extern void core_alua_drop_lu_gp_dev(struct se_device *); ...@@ -117,14 +116,11 @@ extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_device *, const char *, int); struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *);
extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *); extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
extern void core_alua_free_tg_pt_gp_mem(struct se_port *); extern void target_detach_tg_pt_gp(struct se_lun *);
extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *, extern void target_attach_tg_pt_gp(struct se_lun *, struct t10_alua_tg_pt_gp *);
struct t10_alua_tg_pt_gp *); extern ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *, char *);
extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *); extern ssize_t core_alua_store_tg_pt_gp_info(struct se_lun *, const char *,
extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
size_t); size_t);
extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *); extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *, extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
......
...@@ -2889,21 +2889,16 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members( ...@@ -2889,21 +2889,16 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
struct t10_alua_tg_pt_gp *tg_pt_gp, struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page) char *page)
{ {
struct se_port *port;
struct se_portal_group *tpg;
struct se_lun *lun; struct se_lun *lun;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0, cur_len; ssize_t len = 0, cur_len;
unsigned char buf[TG_PT_GROUP_NAME_BUF]; unsigned char buf[TG_PT_GROUP_NAME_BUF];
memset(buf, 0, TG_PT_GROUP_NAME_BUF); memset(buf, 0, TG_PT_GROUP_NAME_BUF);
spin_lock(&tg_pt_gp->tg_pt_gp_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
tg_pt_gp_mem_list) { lun_tg_pt_gp_link) {
port = tg_pt_gp_mem->tg_pt; struct se_portal_group *tpg = lun->lun_tpg;
tpg = port->sep_tpg;
lun = port->sep_lun;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
"/%s\n", tpg->se_tpg_tfo->get_fabric_name(), "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
......
...@@ -120,8 +120,8 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) ...@@ -120,8 +120,8 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
(se_cmd->data_direction != DMA_NONE)) (se_cmd->data_direction != DMA_NONE))
return TCM_WRITE_PROTECTED; return TCM_WRITE_PROTECTED;
se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_lun = se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0; se_cmd->orig_fe_lun = 0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
...@@ -309,7 +309,6 @@ int core_enable_device_list_for_node( ...@@ -309,7 +309,6 @@ int core_enable_device_list_for_node(
struct se_node_acl *nacl, struct se_node_acl *nacl,
struct se_portal_group *tpg) struct se_portal_group *tpg)
{ {
struct se_port *port = lun->lun_sep;
struct se_dev_entry *orig, *new; struct se_dev_entry *orig, *new;
new = kzalloc(sizeof(*new), GFP_KERNEL); new = kzalloc(sizeof(*new), GFP_KERNEL);
...@@ -320,8 +319,8 @@ int core_enable_device_list_for_node( ...@@ -320,8 +319,8 @@ int core_enable_device_list_for_node(
atomic_set(&new->ua_count, 0); atomic_set(&new->ua_count, 0);
spin_lock_init(&new->ua_lock); spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->alua_port_list);
INIT_LIST_HEAD(&new->ua_list); INIT_LIST_HEAD(&new->ua_list);
INIT_LIST_HEAD(&new->lun_link);
new->mapped_lun = mapped_lun; new->mapped_lun = mapped_lun;
kref_init(&new->pr_kref); kref_init(&new->pr_kref);
...@@ -357,10 +356,10 @@ int core_enable_device_list_for_node( ...@@ -357,10 +356,10 @@ int core_enable_device_list_for_node(
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex); mutex_unlock(&nacl->lun_entry_mutex);
spin_lock_bh(&port->sep_alua_lock); spin_lock_bh(&lun->lun_deve_lock);
list_del(&orig->alua_port_list); list_del(&orig->lun_link);
list_add_tail(&new->alua_port_list, &port->sep_alua_list); list_add_tail(&new->lun_link, &lun->lun_deve_list);
spin_unlock_bh(&port->sep_alua_lock); spin_unlock_bh(&lun->lun_deve_lock);
kref_put(&orig->pr_kref, target_pr_kref_release); kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp); wait_for_completion(&orig->pr_comp);
...@@ -374,9 +373,9 @@ int core_enable_device_list_for_node( ...@@ -374,9 +373,9 @@ int core_enable_device_list_for_node(
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex); mutex_unlock(&nacl->lun_entry_mutex);
spin_lock_bh(&port->sep_alua_lock); spin_lock_bh(&lun->lun_deve_lock);
list_add_tail(&new->alua_port_list, &port->sep_alua_list); list_add_tail(&new->lun_link, &lun->lun_deve_list);
spin_unlock_bh(&port->sep_alua_lock); spin_unlock_bh(&lun->lun_deve_lock);
return 0; return 0;
} }
...@@ -390,23 +389,22 @@ void core_disable_device_list_for_node( ...@@ -390,23 +389,22 @@ void core_disable_device_list_for_node(
struct se_node_acl *nacl, struct se_node_acl *nacl,
struct se_portal_group *tpg) struct se_portal_group *tpg)
{ {
struct se_port *port = lun->lun_sep;
/* /*
* If the MappedLUN entry is being disabled, the entry in * If the MappedLUN entry is being disabled, the entry in
* port->sep_alua_list must be removed now before clearing the * lun->lun_deve_list must be removed now before clearing the
* struct se_dev_entry pointers below as logic in * struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present. * core_alua_do_transition_tg_pt() depends on these being present.
* *
* deve->se_lun_acl will be NULL for demo-mode created LUNs * deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly converted to MappedLUNs -> * that have not been explicitly converted to MappedLUNs ->
* struct se_lun_acl, but we remove deve->alua_port_list from * struct se_lun_acl, but we remove deve->lun_link from
* port->sep_alua_list. This also means that active UAs and * lun->lun_deve_list. This also means that active UAs and
* NodeACL context specific PR metadata for demo-mode * NodeACL context specific PR metadata for demo-mode
* MappedLUN *deve will be released below.. * MappedLUN *deve will be released below..
*/ */
spin_lock_bh(&port->sep_alua_lock); spin_lock_bh(&lun->lun_deve_lock);
list_del(&orig->alua_port_list); list_del(&orig->lun_link);
spin_unlock_bh(&port->sep_alua_lock); spin_unlock_bh(&lun->lun_deve_lock);
/* /*
* Disable struct se_dev_entry LUN ACL mapping * Disable struct se_dev_entry LUN ACL mapping
*/ */
...@@ -458,27 +456,16 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) ...@@ -458,27 +456,16 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
mutex_unlock(&tpg->acl_node_mutex); mutex_unlock(&tpg->acl_node_mutex);
} }
static struct se_port *core_alloc_port(struct se_device *dev) int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
{ {
struct se_port *port, *port_tmp; struct se_lun *tmp;
port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
if (!port) {
pr_err("Unable to allocate struct se_port\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&port->sep_alua_list);
INIT_LIST_HEAD(&port->sep_list);
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
spin_lock_init(&port->sep_alua_lock);
mutex_init(&port->sep_tg_pt_md_mutex);
spin_lock(&dev->se_port_lock); spin_lock(&dev->se_port_lock);
if (dev->dev_port_count == 0x0000ffff) { if (dev->export_count == 0x0000ffff) {
pr_warn("Reached dev->dev_port_count ==" pr_warn("Reached dev->dev_port_count =="
" 0x0000ffff\n"); " 0x0000ffff\n");
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
return ERR_PTR(-ENOSPC); return -ENOSPC;
} }
again: again:
/* /*
...@@ -493,135 +480,23 @@ static struct se_port *core_alloc_port(struct se_device *dev) ...@@ -493,135 +480,23 @@ static struct se_port *core_alloc_port(struct se_device *dev)
* 2h Relative port 2, historically known as port B * 2h Relative port 2, historically known as port B
* 3h to FFFFh Relative port 3 through 65 535 * 3h to FFFFh Relative port 3 through 65 535
*/ */
port->sep_rtpi = dev->dev_rpti_counter++; lun->lun_rtpi = dev->dev_rpti_counter++;
if (!port->sep_rtpi) if (!lun->lun_rtpi)
goto again; goto again;
list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
/* /*
* Make sure RELATIVE TARGET PORT IDENTIFIER is unique * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
* for 16-bit wrap.. * for 16-bit wrap..
*/ */
if (port->sep_rtpi == port_tmp->sep_rtpi) if (lun->lun_rtpi == tmp->lun_rtpi)
goto again; goto again;
} }
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
return port;
}
static void core_export_port(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_port *port,
struct se_lun *lun)
{
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
spin_lock(&dev->se_port_lock);
spin_lock(&lun->lun_sep_lock);
port->sep_tpg = tpg;
port->sep_lun = lun;
lun->lun_sep = port;
spin_unlock(&lun->lun_sep_lock);
list_add_tail(&port->sep_list, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
pr_err("Unable to allocate t10_alua_tg_pt"
"_gp_member_t\n");
return;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_debug("%s/%s: Adding to default ALUA Target Port"
" Group: alua/default_tg_pt_gp\n",
dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
}
dev->dev_port_count++;
port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
}
/*
* Called with struct se_device->se_port_lock spinlock held.
*/
static void core_release_port(struct se_device *dev, struct se_port *port)
__releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
{
/*
* Wait for any port reference for PR ALL_TG_PT=1 operation
* to complete in __core_scsi3_alloc_registration()
*/
spin_unlock(&dev->se_port_lock);
if (atomic_read(&port->sep_tg_pt_ref_cnt))
cpu_relax();
spin_lock(&dev->se_port_lock);
core_alua_free_tg_pt_gp_mem(port);
list_del(&port->sep_list);
dev->dev_port_count--;
kfree(port);
}
int core_dev_export(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_lun *lun)
{
struct se_hba *hba = dev->se_hba;
struct se_port *port;
port = core_alloc_port(dev);
if (IS_ERR(port))
return PTR_ERR(port);
lun->lun_index = dev->dev_index;
lun->lun_se_dev = dev;
lun->lun_rtpi = port->sep_rtpi;
spin_lock(&hba->device_lock);
dev->export_count++;
spin_unlock(&hba->device_lock);
core_export_port(dev, tpg, port, lun);
return 0; return 0;
} }
void core_dev_unexport(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_lun *lun)
{
struct se_hba *hba = dev->se_hba;
struct se_port *port = lun->lun_sep;
spin_lock(&lun->lun_sep_lock);
if (lun->lun_se_dev == NULL) {
spin_unlock(&lun->lun_sep_lock);
return;
}
spin_unlock(&lun->lun_sep_lock);
spin_lock(&dev->se_port_lock);
core_release_port(dev, port);
spin_unlock(&dev->se_port_lock);
spin_lock(&hba->device_lock);
dev->export_count--;
spin_unlock(&hba->device_lock);
lun->lun_sep = NULL;
lun->lun_se_dev = NULL;
}
static void se_release_vpd_for_dev(struct se_device *dev) static void se_release_vpd_for_dev(struct se_device *dev)
{ {
struct t10_vpd *vpd, *vpd_tmp; struct t10_vpd *vpd, *vpd_tmp;
...@@ -783,10 +658,10 @@ int core_dev_add_initiator_node_lun_acl( ...@@ -783,10 +658,10 @@ int core_dev_add_initiator_node_lun_acl(
} }
int core_dev_del_initiator_node_lun_acl( int core_dev_del_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun *lun, struct se_lun *lun,
struct se_lun_acl *lacl) struct se_lun_acl *lacl)
{ {
struct se_portal_group *tpg = lun->lun_tpg;
struct se_node_acl *nacl; struct se_node_acl *nacl;
struct se_dev_entry *deve; struct se_dev_entry *deve;
...@@ -930,6 +805,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) ...@@ -930,6 +805,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
xcopy_lun->lun_se_dev = dev; xcopy_lun->lun_se_dev = dev;
spin_lock_init(&xcopy_lun->lun_sep_lock); spin_lock_init(&xcopy_lun->lun_sep_lock);
init_completion(&xcopy_lun->lun_ref_comp); init_completion(&xcopy_lun->lun_ref_comp);
INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
xcopy_lun->lun_tpg = &xcopy_pt_tpg;
return dev; return dev;
} }
......
...@@ -91,12 +91,11 @@ static int target_fabric_mappedlun_link( ...@@ -91,12 +91,11 @@ static int target_fabric_mappedlun_link(
/* /*
* Ensure that the source port exists * Ensure that the source port exists
*/ */
if (!lun->lun_sep || !lun->lun_sep->sep_tpg) { if (!lun->lun_se_dev) {
pr_err("Source se_lun->lun_sep or lun->lun_sep->sep" pr_err("Source se_lun->lun_se_dev does not exist\n");
"_tpg does not exist\n");
return -EINVAL; return -EINVAL;
} }
se_tpg = lun->lun_sep->sep_tpg; se_tpg = lun->lun_tpg;
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
tpg_ci = &nacl_ci->ci_group->cg_item; tpg_ci = &nacl_ci->ci_group->cg_item;
...@@ -150,9 +149,8 @@ static int target_fabric_mappedlun_unlink( ...@@ -150,9 +149,8 @@ static int target_fabric_mappedlun_unlink(
struct se_lun_acl, se_lun_group); struct se_lun_acl, se_lun_group);
struct se_lun *lun = container_of(to_config_group(lun_ci), struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group); struct se_lun, lun_group);
struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
return core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl); return core_dev_del_initiator_node_lun_acl(lun, lacl);
} }
CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl); CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
...@@ -643,10 +641,10 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp( ...@@ -643,10 +641,10 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
struct se_lun *lun, struct se_lun *lun,
char *page) char *page)
{ {
if (!lun || !lun->lun_sep) if (!lun || !lun->lun_se_dev)
return -ENODEV; return -ENODEV;
return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); return core_alua_show_tg_pt_gp_info(lun, page);
} }
static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
...@@ -654,10 +652,10 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( ...@@ -654,10 +652,10 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
const char *page, const char *page,
size_t count) size_t count)
{ {
if (!lun || !lun->lun_sep) if (!lun || !lun->lun_se_dev)
return -ENODEV; return -ENODEV;
return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); return core_alua_store_tg_pt_gp_info(lun, page, count);
} }
TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR); TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
...@@ -669,7 +667,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline( ...@@ -669,7 +667,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
struct se_lun *lun, struct se_lun *lun,
char *page) char *page)
{ {
if (!lun || !lun->lun_sep) if (!lun || !lun->lun_se_dev)
return -ENODEV; return -ENODEV;
return core_alua_show_offline_bit(lun, page); return core_alua_show_offline_bit(lun, page);
...@@ -680,7 +678,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline( ...@@ -680,7 +678,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
const char *page, const char *page,
size_t count) size_t count)
{ {
if (!lun || !lun->lun_sep) if (!lun || !lun->lun_se_dev)
return -ENODEV; return -ENODEV;
return core_alua_store_offline_bit(lun, page, count); return core_alua_store_offline_bit(lun, page, count);
...@@ -695,7 +693,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status( ...@@ -695,7 +693,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
struct se_lun *lun, struct se_lun *lun,
char *page) char *page)
{ {
if (!lun || !lun->lun_sep) if (!lun || !lun->lun_se_dev)
return -ENODEV; return -ENODEV;
return core_alua_show_secondary_status(lun, page); return core_alua_show_secondary_status(lun, page);
...@@ -706,7 +704,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status( ...@@ -706,7 +704,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
const char *page, const char *page,
size_t count) size_t count)
{ {
if (!lun || !lun->lun_sep) if (!lun || !lun->lun_se_dev)
return -ENODEV; return -ENODEV;
return core_alua_store_secondary_status(lun, page, count); return core_alua_store_secondary_status(lun, page, count);
...@@ -721,7 +719,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md( ...@@ -721,7 +719,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
struct se_lun *lun, struct se_lun *lun,
char *page) char *page)
{ {
if (!lun || !lun->lun_sep) if (!lun || !lun->lun_se_dev)
return -ENODEV; return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page); return core_alua_show_secondary_write_metadata(lun, page);
...@@ -732,7 +730,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md( ...@@ -732,7 +730,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
const char *page, const char *page,
size_t count) size_t count)
{ {
if (!lun || !lun->lun_sep) if (!lun || !lun->lun_se_dev)
return -ENODEV; return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count); return core_alua_store_secondary_write_metadata(lun, page, count);
...@@ -811,7 +809,7 @@ static int target_fabric_port_unlink( ...@@ -811,7 +809,7 @@ static int target_fabric_port_unlink(
{ {
struct se_lun *lun = container_of(to_config_group(lun_ci), struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group); struct se_lun, lun_group);
struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg; struct se_portal_group *se_tpg = lun->lun_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
if (tf->tf_ops->fabric_pre_unlink) { if (tf->tf_ops->fabric_pre_unlink) {
......
...@@ -21,6 +21,7 @@ extern struct t10_alua_lu_gp *default_lu_gp; ...@@ -21,6 +21,7 @@ extern struct t10_alua_lu_gp *default_lu_gp;
extern struct mutex g_device_mutex; extern struct mutex g_device_mutex;
extern struct list_head g_device_list; extern struct list_head g_device_list;
int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev);
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
void target_pr_kref_release(struct kref *); void target_pr_kref_release(struct kref *);
void core_free_device_list_for_node(struct se_node_acl *, void core_free_device_list_for_node(struct se_node_acl *,
...@@ -32,10 +33,6 @@ int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, ...@@ -32,10 +33,6 @@ int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *, void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
struct se_node_acl *, struct se_portal_group *); struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
int core_dev_add_lun(struct se_portal_group *, struct se_device *, int core_dev_add_lun(struct se_portal_group *, struct se_device *,
struct se_lun *lun); struct se_lun *lun);
void core_dev_del_lun(struct se_portal_group *, struct se_lun *); void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
...@@ -43,8 +40,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group * ...@@ -43,8 +40,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *
struct se_node_acl *, u32, int *); struct se_node_acl *, u32, int *);
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *, struct se_lun *lun, u32); struct se_lun_acl *, struct se_lun *lun, u32);
int core_dev_del_initiator_node_lun_acl(struct se_portal_group *, int core_dev_del_initiator_node_lun_acl(struct se_lun *,
struct se_lun *, struct se_lun_acl *); struct se_lun_acl *);
void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl); struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void); int core_dev_setup_virtual_lun0(void);
...@@ -120,4 +117,7 @@ void target_stat_setup_dev_default_groups(struct se_device *); ...@@ -120,4 +117,7 @@ void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *); void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
/* target_core_xcopy.c */
extern struct se_portal_group xcopy_pt_tpg;
#endif /* TARGET_CORE_INTERNAL_H */ #endif /* TARGET_CORE_INTERNAL_H */
...@@ -642,7 +642,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( ...@@ -642,7 +642,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
pr_reg->pr_reg_deve = deve; pr_reg->pr_reg_deve = deve;
pr_reg->pr_res_mapped_lun = mapped_lun; pr_reg->pr_res_mapped_lun = mapped_lun;
pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
pr_reg->tg_pt_sep_rtpi = lun->lun_sep->sep_rtpi; pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
pr_reg->pr_res_key = sa_res_key; pr_reg->pr_res_key = sa_res_key;
pr_reg->pr_reg_all_tg_pt = all_tg_pt; pr_reg->pr_reg_all_tg_pt = all_tg_pt;
pr_reg->pr_reg_aptpl = aptpl; pr_reg->pr_reg_aptpl = aptpl;
...@@ -680,8 +680,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -680,8 +680,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
struct se_dev_entry *deve_tmp; struct se_dev_entry *deve_tmp;
struct se_node_acl *nacl_tmp; struct se_node_acl *nacl_tmp;
struct se_lun_acl *lacl_tmp; struct se_lun_acl *lacl_tmp;
struct se_lun *lun_tmp; struct se_lun *lun_tmp, *next, *dest_lun;
struct se_port *port, *port_tmp;
const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
int ret; int ret;
...@@ -704,13 +703,12 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -704,13 +703,12 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* for ALL_TG_PT=1 * for ALL_TG_PT=1
*/ */
spin_lock(&dev->se_port_lock); spin_lock(&dev->se_port_lock);
list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { list_for_each_entry_safe(lun_tmp, next, &dev->dev_sep_list, lun_dev_link) {
atomic_inc_mb(&port->sep_tg_pt_ref_cnt); atomic_inc_mb(&lun_tmp->lun_active);
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
spin_lock_bh(&port->sep_alua_lock); spin_lock_bh(&lun_tmp->lun_deve_lock);
list_for_each_entry(deve_tmp, &port->sep_alua_list, list_for_each_entry(deve_tmp, &lun_tmp->lun_deve_list, lun_link) {
alua_port_list) {
/* /*
* This pointer will be NULL for demo mode MappedLUNs * This pointer will be NULL for demo mode MappedLUNs
* that have not been make explicit via a ConfigFS * that have not been make explicit via a ConfigFS
...@@ -720,7 +718,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -720,7 +718,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
continue; continue;
lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl, lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl,
lockdep_is_held(&port->sep_alua_lock)); lockdep_is_held(&lun_tmp->lun_deve_lock));
nacl_tmp = lacl_tmp->se_lun_nacl; nacl_tmp = lacl_tmp->se_lun_nacl;
/* /*
* Skip the matching struct se_node_acl that is allocated * Skip the matching struct se_node_acl that is allocated
...@@ -742,7 +740,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -742,7 +740,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
continue; continue;
kref_get(&deve_tmp->pr_kref); kref_get(&deve_tmp->pr_kref);
spin_unlock_bh(&port->sep_alua_lock); spin_unlock_bh(&lun_tmp->lun_deve_lock);
/* /*
* Grab a configfs group dependency that is released * Grab a configfs group dependency that is released
* for the exception path at label out: below, or upon * for the exception path at label out: below, or upon
...@@ -753,7 +751,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -753,7 +751,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (ret < 0) { if (ret < 0) {
pr_err("core_scsi3_lunacl_depend" pr_err("core_scsi3_lunacl_depend"
"_item() failed\n"); "_item() failed\n");
atomic_dec_mb(&port->sep_tg_pt_ref_cnt); atomic_dec_mb(&lun->lun_active);
kref_put(&deve_tmp->pr_kref, target_pr_kref_release); kref_put(&deve_tmp->pr_kref, target_pr_kref_release);
goto out; goto out;
} }
...@@ -764,27 +762,27 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -764,27 +762,27 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* the original *pr_reg is processed in * the original *pr_reg is processed in
* __core_scsi3_add_registration() * __core_scsi3_add_registration()
*/ */
lun_tmp = rcu_dereference_check(deve_tmp->se_lun, dest_lun = rcu_dereference_check(deve_tmp->se_lun,
atomic_read(&deve_tmp->pr_kref.refcount) != 0); atomic_read(&deve_tmp->pr_kref.refcount) != 0);
pr_reg_atp = __core_scsi3_do_alloc_registration(dev, pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
nacl_tmp, lun_tmp, deve_tmp, nacl_tmp, dest_lun, deve_tmp,
deve_tmp->mapped_lun, NULL, deve_tmp->mapped_lun, NULL,
sa_res_key, all_tg_pt, aptpl); sa_res_key, all_tg_pt, aptpl);
if (!pr_reg_atp) { if (!pr_reg_atp) {
atomic_dec_mb(&port->sep_tg_pt_ref_cnt); atomic_dec_mb(&lun_tmp->lun_active);
core_scsi3_lunacl_undepend_item(deve_tmp); core_scsi3_lunacl_undepend_item(deve_tmp);
goto out; goto out;
} }
list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list, list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
&pr_reg->pr_reg_atp_list); &pr_reg->pr_reg_atp_list);
spin_lock_bh(&port->sep_alua_lock); spin_lock_bh(&lun_tmp->lun_deve_lock);
} }
spin_unlock_bh(&port->sep_alua_lock); spin_unlock_bh(&lun_tmp->lun_deve_lock);
spin_lock(&dev->se_port_lock); spin_lock(&dev->se_port_lock);
atomic_dec_mb(&port->sep_tg_pt_ref_cnt); atomic_dec_mb(&lun_tmp->lun_active);
} }
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
...@@ -938,7 +936,7 @@ static int __core_scsi3_check_aptpl_registration( ...@@ -938,7 +936,7 @@ static int __core_scsi3_check_aptpl_registration(
(pr_reg->pr_aptpl_target_lun == target_lun)) { (pr_reg->pr_aptpl_target_lun == target_lun)) {
pr_reg->pr_reg_nacl = nacl; pr_reg->pr_reg_nacl = nacl;
pr_reg->tg_pt_sep_rtpi = lun->lun_sep->sep_rtpi; pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
list_del(&pr_reg->pr_reg_aptpl_list); list_del(&pr_reg->pr_reg_aptpl_list);
spin_unlock(&pr_tmpl->aptpl_reg_lock); spin_unlock(&pr_tmpl->aptpl_reg_lock);
...@@ -1465,7 +1463,6 @@ core_scsi3_decode_spec_i_port( ...@@ -1465,7 +1463,6 @@ core_scsi3_decode_spec_i_port(
int aptpl) int aptpl)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_port *tmp_port;
struct se_portal_group *dest_tpg = NULL, *tmp_tpg; struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
struct se_session *se_sess = cmd->se_sess; struct se_session *se_sess = cmd->se_sess;
struct se_node_acl *dest_node_acl = NULL; struct se_node_acl *dest_node_acl = NULL;
...@@ -1550,16 +1547,14 @@ core_scsi3_decode_spec_i_port( ...@@ -1550,16 +1547,14 @@ core_scsi3_decode_spec_i_port(
ptr = &buf[28]; ptr = &buf[28];
while (tpdl > 0) { while (tpdl > 0) {
struct se_lun *dest_lun; struct se_lun *dest_lun, *tmp_lun;
proto_ident = (ptr[0] & 0x0f); proto_ident = (ptr[0] & 0x0f);
dest_tpg = NULL; dest_tpg = NULL;
spin_lock(&dev->se_port_lock); spin_lock(&dev->se_port_lock);
list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) {
tmp_tpg = tmp_port->sep_tpg; tmp_tpg = tmp_lun->lun_tpg;
if (!tmp_tpg)
continue;
/* /*
* Look for the matching proto_ident provided by * Look for the matching proto_ident provided by
...@@ -1567,7 +1562,7 @@ core_scsi3_decode_spec_i_port( ...@@ -1567,7 +1562,7 @@ core_scsi3_decode_spec_i_port(
*/ */
if (tmp_tpg->proto_id != proto_ident) if (tmp_tpg->proto_id != proto_ident)
continue; continue;
dest_rtpi = tmp_port->sep_rtpi; dest_rtpi = tmp_lun->lun_rtpi;
i_str = target_parse_pr_out_transport_id(tmp_tpg, i_str = target_parse_pr_out_transport_id(tmp_tpg,
(const char *)ptr, &tid_len, &iport_ptr); (const char *)ptr, &tid_len, &iport_ptr);
...@@ -3119,9 +3114,8 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, ...@@ -3119,9 +3114,8 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
struct se_session *se_sess = cmd->se_sess; struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_dev_entry *dest_se_deve = NULL; struct se_dev_entry *dest_se_deve = NULL;
struct se_lun *se_lun = cmd->se_lun; struct se_lun *se_lun = cmd->se_lun, *tmp_lun;
struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
struct se_port *se_port;
struct se_portal_group *se_tpg, *dest_se_tpg = NULL; struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
...@@ -3206,12 +3200,10 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, ...@@ -3206,12 +3200,10 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
} }
spin_lock(&dev->se_port_lock); spin_lock(&dev->se_port_lock);
list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) { list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) {
if (se_port->sep_rtpi != rtpi) if (tmp_lun->lun_rtpi != rtpi)
continue;
dest_se_tpg = se_port->sep_tpg;
if (!dest_se_tpg)
continue; continue;
dest_se_tpg = tmp_lun->lun_tpg;
dest_tf_ops = dest_se_tpg->se_tpg_tfo; dest_tf_ops = dest_se_tpg->se_tpg_tfo;
if (!dest_tf_ops) if (!dest_tf_ops)
continue; continue;
......
...@@ -37,10 +37,9 @@ ...@@ -37,10 +37,9 @@
#include "target_core_ua.h" #include "target_core_ua.h"
#include "target_core_xcopy.h" #include "target_core_xcopy.h"
static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
{ {
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
/* /*
* Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
...@@ -53,17 +52,11 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) ...@@ -53,17 +52,11 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
* *
* See spc4r17 section 6.4.2 Table 135 * See spc4r17 section 6.4.2 Table 135
*/ */
if (!port) spin_lock(&lun->lun_tg_pt_gp_lock);
return; tg_pt_gp = lun->lun_tg_pt_gp;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
return;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (tg_pt_gp) if (tg_pt_gp)
buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_unlock(&lun->lun_tg_pt_gp_lock);
} }
sense_reason_t sense_reason_t
...@@ -94,7 +87,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) ...@@ -94,7 +87,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
/* /*
* Enable SCCS and TPGS fields for Emulated ALUA * Enable SCCS and TPGS fields for Emulated ALUA
*/ */
spc_fill_alua_data(lun->lun_sep, buf); spc_fill_alua_data(lun, buf);
/* /*
* Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
...@@ -181,11 +174,9 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) ...@@ -181,11 +174,9 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun; struct se_lun *lun = cmd->se_lun;
struct se_port *port = NULL;
struct se_portal_group *tpg = NULL; struct se_portal_group *tpg = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *prod = &dev->t10_wwn.model[0]; unsigned char *prod = &dev->t10_wwn.model[0];
u32 prod_len; u32 prod_len;
u32 unit_serial_len, off = 0; u32 unit_serial_len, off = 0;
...@@ -267,18 +258,15 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) ...@@ -267,18 +258,15 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
/* Header size for Designation descriptor */ /* Header size for Designation descriptor */
len += (id_len + 4); len += (id_len + 4);
off += (id_len + 4); off += (id_len + 4);
/*
* struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD if (1) {
*/
port = lun->lun_sep;
if (port) {
struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp *lu_gp;
u32 padding, scsi_name_len, scsi_target_len; u32 padding, scsi_name_len, scsi_target_len;
u16 lu_gp_id = 0; u16 lu_gp_id = 0;
u16 tg_pt_gp_id = 0; u16 tg_pt_gp_id = 0;
u16 tpgt; u16 tpgt;
tpg = port->sep_tpg; tpg = lun->lun_tpg;
/* /*
* Relative target port identifer, see spc4r17 * Relative target port identifer, see spc4r17
* section 7.7.3.7 * section 7.7.3.7
...@@ -298,8 +286,8 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) ...@@ -298,8 +286,8 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
/* Skip over Obsolete field in RTPI payload /* Skip over Obsolete field in RTPI payload
* in Table 472 */ * in Table 472 */
off += 2; off += 2;
buf[off++] = ((port->sep_rtpi >> 8) & 0xff); buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
buf[off++] = (port->sep_rtpi & 0xff); buf[off++] = (lun->lun_rtpi & 0xff);
len += 8; /* Header size + Designation descriptor */ len += 8; /* Header size + Designation descriptor */
/* /*
* Target port group identifier, see spc4r17 * Target port group identifier, see spc4r17
...@@ -308,18 +296,14 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) ...@@ -308,18 +296,14 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* Get the PROTOCOL IDENTIFIER as defined by spc4r17 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362 * section 7.5.1 Table 362
*/ */
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; spin_lock(&lun->lun_tg_pt_gp_lock);
if (!tg_pt_gp_mem) tg_pt_gp = lun->lun_tg_pt_gp;
goto check_lu_gp;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (!tg_pt_gp) { if (!tg_pt_gp) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_unlock(&lun->lun_tg_pt_gp_lock);
goto check_lu_gp; goto check_lu_gp;
} }
tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_unlock(&lun->lun_tg_pt_gp_lock);
buf[off] = tpg->proto_id << 4; buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x1; /* CODE SET == Binary */ buf[off++] |= 0x1; /* CODE SET == Binary */
...@@ -694,7 +678,7 @@ static sense_reason_t ...@@ -694,7 +678,7 @@ static sense_reason_t
spc_emulate_inquiry(struct se_cmd *cmd) spc_emulate_inquiry(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; struct se_portal_group *tpg = cmd->se_lun->lun_tpg;
unsigned char *rbuf; unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb; unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf; unsigned char *buf;
...@@ -708,7 +692,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) ...@@ -708,7 +692,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
if (dev == tpg->tpg_virt_lun0.lun_se_dev) if (dev == tpg->tpg_virt_lun0->lun_se_dev)
buf[0] = 0x3f; /* Not connected */ buf[0] = 0x3f; /* Not connected */
else else
buf[0] = dev->transport->get_device_type(dev); buf[0] = dev->transport->get_device_type(dev);
......
This diff is collapsed.
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include "target_core_internal.h" #include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h" #include "target_core_pr.h"
extern struct se_device *g_lun0_dev; extern struct se_device *g_lun0_dev;
...@@ -484,32 +485,14 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref) ...@@ -484,32 +485,14 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref)
complete(&lun->lun_ref_comp); complete(&lun->lun_ref_comp);
} }
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
struct se_device *dev = g_lun0_dev;
struct se_lun *lun = &se_tpg->tpg_virt_lun0;
u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
int ret;
lun->unpacked_lun = 0;
atomic_set(&lun->lun_acl_count, 0);
spin_lock_init(&lun->lun_sep_lock);
init_completion(&lun->lun_ref_comp);
ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
if (ret < 0)
return ret;
return 0;
}
int core_tpg_register( int core_tpg_register(
const struct target_core_fabric_ops *tfo, const struct target_core_fabric_ops *tfo,
struct se_wwn *se_wwn, struct se_wwn *se_wwn,
struct se_portal_group *se_tpg, struct se_portal_group *se_tpg,
int proto_id) int proto_id)
{ {
int ret;
INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist); INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
se_tpg->proto_id = proto_id; se_tpg->proto_id = proto_id;
se_tpg->se_tpg_tfo = tfo; se_tpg->se_tpg_tfo = tfo;
...@@ -523,8 +506,16 @@ int core_tpg_register( ...@@ -523,8 +506,16 @@ int core_tpg_register(
mutex_init(&se_tpg->acl_node_mutex); mutex_init(&se_tpg->acl_node_mutex);
if (se_tpg->proto_id >= 0) { if (se_tpg->proto_id >= 0) {
if (core_tpg_setup_virtual_lun0(se_tpg) < 0) se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
return -ENOMEM; if (IS_ERR(se_tpg->tpg_virt_lun0))
return PTR_ERR(se_tpg->tpg_virt_lun0);
ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
if (ret < 0) {
kfree(se_tpg->tpg_virt_lun0);
return ret;
}
} }
spin_lock_bh(&tpg_lock); spin_lock_bh(&tpg_lock);
...@@ -575,8 +566,10 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) ...@@ -575,8 +566,10 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
kfree(nacl); kfree(nacl);
} }
if (se_tpg->proto_id >= 0) if (se_tpg->proto_id >= 0) {
core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0); core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
}
return 0; return 0;
} }
...@@ -607,6 +600,15 @@ struct se_lun *core_tpg_alloc_lun( ...@@ -607,6 +600,15 @@ struct se_lun *core_tpg_alloc_lun(
atomic_set(&lun->lun_acl_count, 0); atomic_set(&lun->lun_acl_count, 0);
spin_lock_init(&lun->lun_sep_lock); spin_lock_init(&lun->lun_sep_lock);
init_completion(&lun->lun_ref_comp); init_completion(&lun->lun_ref_comp);
INIT_LIST_HEAD(&lun->lun_deve_list);
INIT_LIST_HEAD(&lun->lun_dev_link);
atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
spin_lock_init(&lun->lun_deve_lock);
mutex_init(&lun->lun_tg_pt_md_mutex);
INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
spin_lock_init(&lun->lun_tg_pt_gp_lock);
atomic_set(&lun->lun_active, 0);
lun->lun_tpg = tpg;
return lun; return lun;
} }
...@@ -622,21 +624,40 @@ int core_tpg_add_lun( ...@@ -622,21 +624,40 @@ int core_tpg_add_lun(
ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
GFP_KERNEL); GFP_KERNEL);
if (ret < 0) if (ret < 0)
return ret; goto out;
ret = core_dev_export(dev, tpg, lun); ret = core_alloc_rtpi(lun, dev);
if (ret < 0) { if (ret)
percpu_ref_exit(&lun->lun_ref); goto out_kill_ref;
return ret;
} if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
mutex_lock(&tpg->tpg_lun_mutex); mutex_lock(&tpg->tpg_lun_mutex);
spin_lock(&lun->lun_sep_lock);
lun->lun_index = dev->dev_index;
lun->lun_se_dev = dev;
spin_unlock(&lun->lun_sep_lock);
spin_lock(&dev->se_port_lock);
rcu_assign_pointer(lun->lun_se_dev, dev);
dev->export_count++;
list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
lun->lun_access = lun_access; lun->lun_access = lun_access;
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
mutex_unlock(&tpg->tpg_lun_mutex); mutex_unlock(&tpg->tpg_lun_mutex);
return 0; return 0;
out_kill_ref:
percpu_ref_exit(&lun->lun_ref);
out:
return ret;
} }
void core_tpg_remove_lun( void core_tpg_remove_lun(
...@@ -648,9 +669,19 @@ void core_tpg_remove_lun( ...@@ -648,9 +669,19 @@ void core_tpg_remove_lun(
core_clear_lun_from_tpg(lun, tpg); core_clear_lun_from_tpg(lun, tpg);
transport_clear_lun_ref(lun); transport_clear_lun_ref(lun);
core_dev_unexport(lun->lun_se_dev, tpg, lun);
mutex_lock(&tpg->tpg_lun_mutex); mutex_lock(&tpg->tpg_lun_mutex);
if (lun->lun_se_dev) {
while (atomic_read(&lun->lun_active))
cpu_relax();
target_detach_tg_pt_gp(lun);
spin_lock(&dev->se_port_lock);
list_del(&lun->lun_dev_link);
dev->export_count--;
rcu_assign_pointer(lun->lun_se_dev, NULL);
spin_unlock(&dev->se_port_lock);
}
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_del_rcu(&lun->link); hlist_del_rcu(&lun->link);
mutex_unlock(&tpg->tpg_lun_mutex); mutex_unlock(&tpg->tpg_lun_mutex);
......
...@@ -60,7 +60,6 @@ struct kmem_cache *t10_pr_reg_cache; ...@@ -60,7 +60,6 @@ struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache; struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache; struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache; struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
struct kmem_cache *t10_alua_lba_map_cache; struct kmem_cache *t10_alua_lba_map_cache;
struct kmem_cache *t10_alua_lba_map_mem_cache; struct kmem_cache *t10_alua_lba_map_mem_cache;
...@@ -119,16 +118,6 @@ int init_se_kmem_caches(void) ...@@ -119,16 +118,6 @@ int init_se_kmem_caches(void)
"cache failed\n"); "cache failed\n");
goto out_free_lu_gp_mem_cache; goto out_free_lu_gp_mem_cache;
} }
t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
"t10_alua_tg_pt_gp_mem_cache",
sizeof(struct t10_alua_tg_pt_gp_member),
__alignof__(struct t10_alua_tg_pt_gp_member),
0, NULL);
if (!t10_alua_tg_pt_gp_mem_cache) {
pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"mem_t failed\n");
goto out_free_tg_pt_gp_cache;
}
t10_alua_lba_map_cache = kmem_cache_create( t10_alua_lba_map_cache = kmem_cache_create(
"t10_alua_lba_map_cache", "t10_alua_lba_map_cache",
sizeof(struct t10_alua_lba_map), sizeof(struct t10_alua_lba_map),
...@@ -136,7 +125,7 @@ int init_se_kmem_caches(void) ...@@ -136,7 +125,7 @@ int init_se_kmem_caches(void)
if (!t10_alua_lba_map_cache) { if (!t10_alua_lba_map_cache) {
pr_err("kmem_cache_create() for t10_alua_lba_map_" pr_err("kmem_cache_create() for t10_alua_lba_map_"
"cache failed\n"); "cache failed\n");
goto out_free_tg_pt_gp_mem_cache; goto out_free_tg_pt_gp_cache;
} }
t10_alua_lba_map_mem_cache = kmem_cache_create( t10_alua_lba_map_mem_cache = kmem_cache_create(
"t10_alua_lba_map_mem_cache", "t10_alua_lba_map_mem_cache",
...@@ -159,8 +148,6 @@ int init_se_kmem_caches(void) ...@@ -159,8 +148,6 @@ int init_se_kmem_caches(void)
kmem_cache_destroy(t10_alua_lba_map_mem_cache); kmem_cache_destroy(t10_alua_lba_map_mem_cache);
out_free_lba_map_cache: out_free_lba_map_cache:
kmem_cache_destroy(t10_alua_lba_map_cache); kmem_cache_destroy(t10_alua_lba_map_cache);
out_free_tg_pt_gp_mem_cache:
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache: out_free_tg_pt_gp_cache:
kmem_cache_destroy(t10_alua_tg_pt_gp_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache: out_free_lu_gp_mem_cache:
...@@ -186,7 +173,6 @@ void release_se_kmem_caches(void) ...@@ -186,7 +173,6 @@ void release_se_kmem_caches(void)
kmem_cache_destroy(t10_alua_lu_gp_cache); kmem_cache_destroy(t10_alua_lu_gp_cache);
kmem_cache_destroy(t10_alua_lu_gp_mem_cache); kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
kmem_cache_destroy(t10_alua_lba_map_cache); kmem_cache_destroy(t10_alua_lba_map_cache);
kmem_cache_destroy(t10_alua_lba_map_mem_cache); kmem_cache_destroy(t10_alua_lba_map_mem_cache);
} }
...@@ -1277,8 +1263,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) ...@@ -1277,8 +1263,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
spin_lock(&cmd->se_lun->lun_sep_lock); spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep) cmd->se_lun->lun_stats.cmd_pdus++;
cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
spin_unlock(&cmd->se_lun->lun_sep_lock); spin_unlock(&cmd->se_lun->lun_sep_lock);
return 0; return 0;
} }
...@@ -2076,10 +2061,7 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2076,10 +2061,7 @@ static void target_complete_ok_work(struct work_struct *work)
switch (cmd->data_direction) { switch (cmd->data_direction) {
case DMA_FROM_DEVICE: case DMA_FROM_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock); spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep) { cmd->se_lun->lun_stats.tx_data_octets += cmd->data_length;
cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock); spin_unlock(&cmd->se_lun->lun_sep_lock);
/* /*
* Perform READ_STRIP of PI using software emulation when * Perform READ_STRIP of PI using software emulation when
...@@ -2104,20 +2086,14 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2104,20 +2086,14 @@ static void target_complete_ok_work(struct work_struct *work)
break; break;
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock); spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep) { cmd->se_lun->lun_stats.rx_data_octets += cmd->data_length;
cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock); spin_unlock(&cmd->se_lun->lun_sep_lock);
/* /*
* Check if we need to send READ payload for BIDI-COMMAND * Check if we need to send READ payload for BIDI-COMMAND
*/ */
if (cmd->se_cmd_flags & SCF_BIDI) { if (cmd->se_cmd_flags & SCF_BIDI) {
spin_lock(&cmd->se_lun->lun_sep_lock); spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep) { cmd->se_lun->lun_stats.tx_data_octets += cmd->data_length;
cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock); spin_unlock(&cmd->se_lun->lun_sep_lock);
ret = cmd->se_tfo->queue_data_in(cmd); ret = cmd->se_tfo->queue_data_in(cmd);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret == -EAGAIN || ret == -ENOMEM)
......
...@@ -348,8 +348,7 @@ struct xcopy_pt_cmd { ...@@ -348,8 +348,7 @@ struct xcopy_pt_cmd {
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
}; };
static struct se_port xcopy_pt_port; struct se_portal_group xcopy_pt_tpg;
static struct se_portal_group xcopy_pt_tpg;
static struct se_session xcopy_pt_sess; static struct se_session xcopy_pt_sess;
static struct se_node_acl xcopy_pt_nacl; static struct se_node_acl xcopy_pt_nacl;
...@@ -439,17 +438,11 @@ int target_xcopy_setup_pt(void) ...@@ -439,17 +438,11 @@ int target_xcopy_setup_pt(void)
return -ENOMEM; return -ENOMEM;
} }
memset(&xcopy_pt_port, 0, sizeof(struct se_port));
INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group)); memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node); INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list); INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list); INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo; xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl)); memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
...@@ -490,10 +483,6 @@ static void target_xcopy_setup_pt_port( ...@@ -490,10 +483,6 @@ static void target_xcopy_setup_pt_port(
*/ */
if (remote_port) { if (remote_port) {
xpt_cmd->remote_port = remote_port; xpt_cmd->remote_port = remote_port;
pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to"
" cmd->se_lun->lun_sep for X-COPY data PUSH\n",
pt_cmd->se_lun->lun_sep);
} else { } else {
pt_cmd->se_lun = ec_cmd->se_lun; pt_cmd->se_lun = ec_cmd->se_lun;
pt_cmd->se_dev = ec_cmd->se_dev; pt_cmd->se_dev = ec_cmd->se_dev;
...@@ -513,10 +502,6 @@ static void target_xcopy_setup_pt_port( ...@@ -513,10 +502,6 @@ static void target_xcopy_setup_pt_port(
*/ */
if (remote_port) { if (remote_port) {
xpt_cmd->remote_port = remote_port; xpt_cmd->remote_port = remote_port;
pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to"
" cmd->se_lun->lun_sep for X-COPY data PULL\n",
pt_cmd->se_lun->lun_sep);
} else { } else {
pt_cmd->se_lun = ec_cmd->se_lun; pt_cmd->se_lun = ec_cmd->se_lun;
pt_cmd->se_dev = ec_cmd->se_dev; pt_cmd->se_dev = ec_cmd->se_dev;
......
...@@ -304,22 +304,13 @@ struct t10_alua_tg_pt_gp { ...@@ -304,22 +304,13 @@ struct t10_alua_tg_pt_gp {
struct se_device *tg_pt_gp_dev; struct se_device *tg_pt_gp_dev;
struct config_group tg_pt_gp_group; struct config_group tg_pt_gp_group;
struct list_head tg_pt_gp_list; struct list_head tg_pt_gp_list;
struct list_head tg_pt_gp_mem_list; struct list_head tg_pt_gp_lun_list;
struct se_port *tg_pt_gp_alua_port; struct se_lun *tg_pt_gp_alua_lun;
struct se_node_acl *tg_pt_gp_alua_nacl; struct se_node_acl *tg_pt_gp_alua_nacl;
struct delayed_work tg_pt_gp_transition_work; struct delayed_work tg_pt_gp_transition_work;
struct completion *tg_pt_gp_transition_complete; struct completion *tg_pt_gp_transition_complete;
}; };
struct t10_alua_tg_pt_gp_member {
bool tg_pt_gp_assoc;
atomic_t tg_pt_gp_mem_ref_cnt;
spinlock_t tg_pt_gp_mem_lock;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct se_port *tg_pt;
struct list_head tg_pt_gp_mem_list;
};
struct t10_vpd { struct t10_vpd {
unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
int protocol_identifier_set; int protocol_identifier_set;
...@@ -650,6 +641,7 @@ struct se_dev_entry { ...@@ -650,6 +641,7 @@ struct se_dev_entry {
#define DEF_PR_REG_ACTIVE 1 #define DEF_PR_REG_ACTIVE 1
unsigned long deve_flags; unsigned long deve_flags;
struct list_head alua_port_list; struct list_head alua_port_list;
struct list_head lun_link;
struct list_head ua_list; struct list_head ua_list;
struct hlist_node link; struct hlist_node link;
struct rcu_head rcu_head; struct rcu_head rcu_head;
...@@ -697,7 +689,14 @@ struct se_port_stat_grps { ...@@ -697,7 +689,14 @@ struct se_port_stat_grps {
struct config_group scsi_transport_group; struct config_group scsi_transport_group;
}; };
struct scsi_port_stats {
u32 cmd_pdus;
u64 tx_data_octets;
u64 rx_data_octets;
};
struct se_lun { struct se_lun {
/* RELATIVE TARGET PORT IDENTIFER */
u16 lun_rtpi; u16 lun_rtpi;
#define SE_LUN_LINK_MAGIC 0xffff7771 #define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic; u32 lun_link_magic;
...@@ -707,12 +706,30 @@ struct se_lun { ...@@ -707,12 +706,30 @@ struct se_lun {
u32 lun_index; u32 lun_index;
atomic_t lun_acl_count; atomic_t lun_acl_count;
spinlock_t lun_sep_lock; spinlock_t lun_sep_lock;
struct se_device *lun_se_dev; struct se_device __rcu *lun_se_dev;
struct se_port *lun_sep;
struct list_head lun_deve_list;
spinlock_t lun_deve_lock;
/* ALUA state */
int lun_tg_pt_secondary_stat;
int lun_tg_pt_secondary_write_md;
atomic_t lun_tg_pt_secondary_offline;
struct mutex lun_tg_pt_md_mutex;
/* ALUA target port group linkage */
struct list_head lun_tg_pt_gp_link;
struct t10_alua_tg_pt_gp *lun_tg_pt_gp;
spinlock_t lun_tg_pt_gp_lock;
atomic_t lun_active;
struct se_portal_group *lun_tpg;
struct scsi_port_stats lun_stats;
struct config_group lun_group; struct config_group lun_group;
struct se_port_stat_grps port_stat_grps; struct se_port_stat_grps port_stat_grps;
struct completion lun_ref_comp; struct completion lun_ref_comp;
struct percpu_ref lun_ref; struct percpu_ref lun_ref;
struct list_head lun_dev_link;
struct hlist_node link; struct hlist_node link;
struct rcu_head rcu_head; struct rcu_head rcu_head;
}; };
...@@ -737,7 +754,6 @@ struct se_device { ...@@ -737,7 +754,6 @@ struct se_device {
#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004
#define DF_USING_UDEV_PATH 0x00000008 #define DF_USING_UDEV_PATH 0x00000008
#define DF_USING_ALIAS 0x00000010 #define DF_USING_ALIAS 0x00000010
u32 dev_port_count;
/* Physical device queue depth */ /* Physical device queue depth */
u32 queue_depth; u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */ /* Used for SPC-2 reservations enforce of ISIDs */
...@@ -754,7 +770,7 @@ struct se_device { ...@@ -754,7 +770,7 @@ struct se_device {
atomic_t dev_ordered_id; atomic_t dev_ordered_id;
atomic_t dev_ordered_sync; atomic_t dev_ordered_sync;
atomic_t dev_qf_count; atomic_t dev_qf_count;
int export_count; u32 export_count;
spinlock_t delayed_cmd_lock; spinlock_t delayed_cmd_lock;
spinlock_t execute_task_lock; spinlock_t execute_task_lock;
spinlock_t dev_reservation_lock; spinlock_t dev_reservation_lock;
...@@ -821,32 +837,6 @@ struct se_hba { ...@@ -821,32 +837,6 @@ struct se_hba {
struct target_backend *backend; struct target_backend *backend;
}; };
struct scsi_port_stats {
u64 cmd_pdus;
u64 tx_data_octets;
u64 rx_data_octets;
};
struct se_port {
/* RELATIVE TARGET PORT IDENTIFER */
u16 sep_rtpi;
int sep_tg_pt_secondary_stat;
int sep_tg_pt_secondary_write_md;
u32 sep_index;
struct scsi_port_stats sep_stats;
/* Used for ALUA Target Port Groups membership */
atomic_t sep_tg_pt_secondary_offline;
/* Used for PR ALL_TG_PT=1 */
atomic_t sep_tg_pt_ref_cnt;
spinlock_t sep_alua_lock;
struct mutex sep_tg_pt_md_mutex;
struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
struct se_lun *sep_lun;
struct se_portal_group *sep_tpg;
struct list_head sep_alua_list;
struct list_head sep_list;
};
struct se_tpg_np { struct se_tpg_np {
struct se_portal_group *tpg_np_parent; struct se_portal_group *tpg_np_parent;
struct config_group tpg_np_group; struct config_group tpg_np_group;
...@@ -872,7 +862,7 @@ struct se_portal_group { ...@@ -872,7 +862,7 @@ struct se_portal_group {
/* linked list for initiator ACL list */ /* linked list for initiator ACL list */
struct list_head acl_node_list; struct list_head acl_node_list;
struct hlist_head tpg_lun_hlist; struct hlist_head tpg_lun_hlist;
struct se_lun tpg_virt_lun0; struct se_lun *tpg_virt_lun0;
/* List of TCM sessions associated wth this TPG */ /* List of TCM sessions associated wth this TPG */
struct list_head tpg_sess_list; struct list_head tpg_sess_list;
/* Pointer to $FABRIC_MOD dependent code */ /* Pointer to $FABRIC_MOD dependent code */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment