Commit 33940d09 authored by Joern Engel's avatar Joern Engel Committed by Nicholas Bellinger

target: encapsulate smp_mb__after_atomic()

The target code has a rather generous helping of smp_mb__after_atomic()
throughout the code base.  Most atomic operations were followed by one
and none were preceded by smp_mb__before_atomic(), nor accompanied by a
comment explaining the need for a barrier.

Instead of trying to prove for every case whether or not it is needed,
this patch introduces atomic_inc_mb() and atomic_dec_mb(), which
explicitly include the memory barriers before and after the atomic
operation.  For now they are defined in a target header, although they
could be of general use.

Most of the existing atomic/mb combinations were replaced by the new
helpers.  In a few cases the atomic was sandwiched in
spin_lock/spin_unlock and I simply removed the barrier.

I suspect that in most cases the correct conversion would have been to
drop the barrier.  I also suspect that a few cases exist where a) the
barrier was necessary and b) a second barrier before the atomic would
have been necessary and got added by this patch.
Signed-off-by: default avatarJoern Engel <joern@logfs.org>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 74ed7e62
...@@ -960,8 +960,7 @@ static int tcm_loop_port_link( ...@@ -960,8 +960,7 @@ static int tcm_loop_port_link(
struct tcm_loop_tpg, tl_se_tpg); struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
atomic_inc(&tl_tpg->tl_tpg_port_count); atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
smp_mb__after_atomic();
/* /*
* Add Linux/SCSI struct scsi_device by HCTL * Add Linux/SCSI struct scsi_device by HCTL
*/ */
...@@ -995,8 +994,7 @@ static void tcm_loop_port_unlink( ...@@ -995,8 +994,7 @@ static void tcm_loop_port_unlink(
scsi_remove_device(sd); scsi_remove_device(sd);
scsi_device_put(sd); scsi_device_put(sd);
atomic_dec(&tl_tpg->tl_tpg_port_count); atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
smp_mb__after_atomic();
pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
} }
......
...@@ -392,8 +392,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) ...@@ -392,8 +392,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
continue; continue;
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic();
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
...@@ -403,8 +402,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) ...@@ -403,8 +402,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
found = true; found = true;
spin_lock(&dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic();
break; break;
} }
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
...@@ -998,8 +996,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) ...@@ -998,8 +996,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
* every I_T nexus other than the I_T nexus on which the SET * every I_T nexus other than the I_T nexus on which the SET
* TARGET PORT GROUPS command * TARGET PORT GROUPS command
*/ */
atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); atomic_inc_mb(&mem->tg_pt_gp_mem_ref_cnt);
smp_mb__after_atomic();
spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
spin_lock_bh(&port->sep_alua_lock); spin_lock_bh(&port->sep_alua_lock);
...@@ -1028,8 +1025,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) ...@@ -1028,8 +1025,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
spin_unlock_bh(&port->sep_alua_lock); spin_unlock_bh(&port->sep_alua_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock);
atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); atomic_dec_mb(&mem->tg_pt_gp_mem_ref_cnt);
smp_mb__after_atomic();
} }
spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
/* /*
...@@ -1063,7 +1059,6 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) ...@@ -1063,7 +1059,6 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
spin_lock(&dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic();
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (tg_pt_gp->tg_pt_gp_transition_complete) if (tg_pt_gp->tg_pt_gp_transition_complete)
...@@ -1125,7 +1120,6 @@ static int core_alua_do_transition_tg_pt( ...@@ -1125,7 +1120,6 @@ static int core_alua_do_transition_tg_pt(
*/ */
spin_lock(&dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic();
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
...@@ -1168,7 +1162,6 @@ int core_alua_do_port_transition( ...@@ -1168,7 +1162,6 @@ int core_alua_do_port_transition(
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
lu_gp = local_lu_gp_mem->lu_gp; lu_gp = local_lu_gp_mem->lu_gp;
atomic_inc(&lu_gp->lu_gp_ref_cnt); atomic_inc(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic();
spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
/* /*
* For storage objects that are members of the 'default_lu_gp', * For storage objects that are members of the 'default_lu_gp',
...@@ -1184,8 +1177,7 @@ int core_alua_do_port_transition( ...@@ -1184,8 +1177,7 @@ int core_alua_do_port_transition(
l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
new_state, explicit); new_state, explicit);
atomic_dec(&lu_gp->lu_gp_ref_cnt); atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic();
return rc; return rc;
} }
/* /*
...@@ -1198,8 +1190,7 @@ int core_alua_do_port_transition( ...@@ -1198,8 +1190,7 @@ int core_alua_do_port_transition(
lu_gp_mem_list) { lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev; dev = lu_gp_mem->lu_gp_mem_dev;
atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
smp_mb__after_atomic();
spin_unlock(&lu_gp->lu_gp_lock); spin_unlock(&lu_gp->lu_gp_lock);
spin_lock(&dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
...@@ -1227,8 +1218,7 @@ int core_alua_do_port_transition( ...@@ -1227,8 +1218,7 @@ int core_alua_do_port_transition(
tg_pt_gp->tg_pt_gp_alua_port = NULL; tg_pt_gp->tg_pt_gp_alua_port = NULL;
tg_pt_gp->tg_pt_gp_alua_nacl = NULL; tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
} }
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic();
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/* /*
* core_alua_do_transition_tg_pt() will always return * core_alua_do_transition_tg_pt() will always return
...@@ -1238,16 +1228,14 @@ int core_alua_do_port_transition( ...@@ -1238,16 +1228,14 @@ int core_alua_do_port_transition(
new_state, explicit); new_state, explicit);
spin_lock(&dev->t10_alua.tg_pt_gps_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic();
if (rc) if (rc)
break; break;
} }
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock); spin_lock(&lu_gp->lu_gp_lock);
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
smp_mb__after_atomic();
} }
spin_unlock(&lu_gp->lu_gp_lock); spin_unlock(&lu_gp->lu_gp_lock);
...@@ -1260,8 +1248,7 @@ int core_alua_do_port_transition( ...@@ -1260,8 +1248,7 @@ int core_alua_do_port_transition(
core_alua_dump_state(new_state)); core_alua_dump_state(new_state));
} }
atomic_dec(&lu_gp->lu_gp_ref_cnt); atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic();
return rc; return rc;
} }
......
...@@ -224,8 +224,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( ...@@ -224,8 +224,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
if (port->sep_rtpi != rtpi) if (port->sep_rtpi != rtpi)
continue; continue;
atomic_inc(&deve->pr_ref_count); atomic_inc_mb(&deve->pr_ref_count);
smp_mb__after_atomic();
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
return deve; return deve;
...@@ -1388,8 +1387,7 @@ int core_dev_add_initiator_node_lun_acl( ...@@ -1388,8 +1387,7 @@ int core_dev_add_initiator_node_lun_acl(
spin_lock(&lun->lun_acl_lock); spin_lock(&lun->lun_acl_lock);
list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
atomic_inc(&lun->lun_acl_count); atomic_inc_mb(&lun->lun_acl_count);
smp_mb__after_atomic();
spin_unlock(&lun->lun_acl_lock); spin_unlock(&lun->lun_acl_lock);
pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
...@@ -1422,8 +1420,7 @@ int core_dev_del_initiator_node_lun_acl( ...@@ -1422,8 +1420,7 @@ int core_dev_del_initiator_node_lun_acl(
spin_lock(&lun->lun_acl_lock); spin_lock(&lun->lun_acl_lock);
list_del(&lacl->lacl_list); list_del(&lacl->lacl_list);
atomic_dec(&lun->lun_acl_count); atomic_dec_mb(&lun->lun_acl_count);
smp_mb__after_atomic();
spin_unlock(&lun->lun_acl_lock); spin_unlock(&lun->lun_acl_lock);
core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
......
...@@ -674,8 +674,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -674,8 +674,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
*/ */
spin_lock(&dev->se_port_lock); spin_lock(&dev->se_port_lock);
list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
atomic_inc(&port->sep_tg_pt_ref_cnt); atomic_inc_mb(&port->sep_tg_pt_ref_cnt);
smp_mb__after_atomic();
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
spin_lock_bh(&port->sep_alua_lock); spin_lock_bh(&port->sep_alua_lock);
...@@ -709,8 +708,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -709,8 +708,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
continue; continue;
atomic_inc(&deve_tmp->pr_ref_count); atomic_inc_mb(&deve_tmp->pr_ref_count);
smp_mb__after_atomic();
spin_unlock_bh(&port->sep_alua_lock); spin_unlock_bh(&port->sep_alua_lock);
/* /*
* Grab a configfs group dependency that is released * Grab a configfs group dependency that is released
...@@ -722,10 +720,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -722,10 +720,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (ret < 0) { if (ret < 0) {
pr_err("core_scsi3_lunacl_depend" pr_err("core_scsi3_lunacl_depend"
"_item() failed\n"); "_item() failed\n");
atomic_dec(&port->sep_tg_pt_ref_cnt); atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
smp_mb__after_atomic(); atomic_dec_mb(&deve_tmp->pr_ref_count);
atomic_dec(&deve_tmp->pr_ref_count);
smp_mb__after_atomic();
goto out; goto out;
} }
/* /*
...@@ -739,10 +735,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -739,10 +735,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
nacl_tmp, deve_tmp, NULL, nacl_tmp, deve_tmp, NULL,
sa_res_key, all_tg_pt, aptpl); sa_res_key, all_tg_pt, aptpl);
if (!pr_reg_atp) { if (!pr_reg_atp) {
atomic_dec(&port->sep_tg_pt_ref_cnt); atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
smp_mb__after_atomic(); atomic_dec_mb(&deve_tmp->pr_ref_count);
atomic_dec(&deve_tmp->pr_ref_count);
smp_mb__after_atomic();
core_scsi3_lunacl_undepend_item(deve_tmp); core_scsi3_lunacl_undepend_item(deve_tmp);
goto out; goto out;
} }
...@@ -754,8 +748,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( ...@@ -754,8 +748,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
spin_unlock_bh(&port->sep_alua_lock); spin_unlock_bh(&port->sep_alua_lock);
spin_lock(&dev->se_port_lock); spin_lock(&dev->se_port_lock);
atomic_dec(&port->sep_tg_pt_ref_cnt); atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
smp_mb__after_atomic();
} }
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
...@@ -1109,8 +1102,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( ...@@ -1109,8 +1102,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
if (dev->dev_attrib.enforce_pr_isids) if (dev->dev_attrib.enforce_pr_isids)
continue; continue;
} }
atomic_inc(&pr_reg->pr_res_holders); atomic_inc_mb(&pr_reg->pr_res_holders);
smp_mb__after_atomic();
spin_unlock(&pr_tmpl->registration_lock); spin_unlock(&pr_tmpl->registration_lock);
return pr_reg; return pr_reg;
} }
...@@ -1124,8 +1116,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( ...@@ -1124,8 +1116,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
if (strcmp(isid, pr_reg->pr_reg_isid)) if (strcmp(isid, pr_reg->pr_reg_isid))
continue; continue;
atomic_inc(&pr_reg->pr_res_holders); atomic_inc_mb(&pr_reg->pr_res_holders);
smp_mb__after_atomic();
spin_unlock(&pr_tmpl->registration_lock); spin_unlock(&pr_tmpl->registration_lock);
return pr_reg; return pr_reg;
} }
...@@ -1154,8 +1145,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( ...@@ -1154,8 +1145,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
{ {
atomic_dec(&pr_reg->pr_res_holders); atomic_dec_mb(&pr_reg->pr_res_holders);
smp_mb__after_atomic();
} }
static int core_scsi3_check_implicit_release( static int core_scsi3_check_implicit_release(
...@@ -1348,8 +1338,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) ...@@ -1348,8 +1338,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&tpg->tpg_group.cg_item); &tpg->tpg_group.cg_item);
atomic_dec(&tpg->tpg_pr_ref_count); atomic_dec_mb(&tpg->tpg_pr_ref_count);
smp_mb__after_atomic();
} }
static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
...@@ -1368,16 +1357,14 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) ...@@ -1368,16 +1357,14 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
struct se_portal_group *tpg = nacl->se_tpg; struct se_portal_group *tpg = nacl->se_tpg;
if (nacl->dynamic_node_acl) { if (nacl->dynamic_node_acl) {
atomic_dec(&nacl->acl_pr_ref_count); atomic_dec_mb(&nacl->acl_pr_ref_count);
smp_mb__after_atomic();
return; return;
} }
configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&nacl->acl_group.cg_item); &nacl->acl_group.cg_item);
atomic_dec(&nacl->acl_pr_ref_count); atomic_dec_mb(&nacl->acl_pr_ref_count);
smp_mb__after_atomic();
} }
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
...@@ -1407,8 +1394,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) ...@@ -1407,8 +1394,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
* For nacl->dynamic_node_acl=1 * For nacl->dynamic_node_acl=1
*/ */
if (!lun_acl) { if (!lun_acl) {
atomic_dec(&se_deve->pr_ref_count); atomic_dec_mb(&se_deve->pr_ref_count);
smp_mb__after_atomic();
return; return;
} }
nacl = lun_acl->se_lun_nacl; nacl = lun_acl->se_lun_nacl;
...@@ -1417,8 +1403,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) ...@@ -1417,8 +1403,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&lun_acl->se_lun_group.cg_item); &lun_acl->se_lun_group.cg_item);
atomic_dec(&se_deve->pr_ref_count); atomic_dec_mb(&se_deve->pr_ref_count);
smp_mb__after_atomic();
} }
static sense_reason_t static sense_reason_t
...@@ -1551,15 +1536,13 @@ core_scsi3_decode_spec_i_port( ...@@ -1551,15 +1536,13 @@ core_scsi3_decode_spec_i_port(
if (!i_str) if (!i_str)
continue; continue;
atomic_inc(&tmp_tpg->tpg_pr_ref_count); atomic_inc_mb(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic();
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
if (core_scsi3_tpg_depend_item(tmp_tpg)) { if (core_scsi3_tpg_depend_item(tmp_tpg)) {
pr_err(" core_scsi3_tpg_depend_item()" pr_err(" core_scsi3_tpg_depend_item()"
" for tmp_tpg\n"); " for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count); atomic_dec_mb(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic();
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out_unmap; goto out_unmap;
} }
...@@ -1571,10 +1554,8 @@ core_scsi3_decode_spec_i_port( ...@@ -1571,10 +1554,8 @@ core_scsi3_decode_spec_i_port(
spin_lock_irq(&tmp_tpg->acl_node_lock); spin_lock_irq(&tmp_tpg->acl_node_lock);
dest_node_acl = __core_tpg_get_initiator_node_acl( dest_node_acl = __core_tpg_get_initiator_node_acl(
tmp_tpg, i_str); tmp_tpg, i_str);
if (dest_node_acl) { if (dest_node_acl)
atomic_inc(&dest_node_acl->acl_pr_ref_count); atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic();
}
spin_unlock_irq(&tmp_tpg->acl_node_lock); spin_unlock_irq(&tmp_tpg->acl_node_lock);
if (!dest_node_acl) { if (!dest_node_acl) {
...@@ -1586,8 +1567,7 @@ core_scsi3_decode_spec_i_port( ...@@ -1586,8 +1567,7 @@ core_scsi3_decode_spec_i_port(
if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
pr_err("configfs_depend_item() failed" pr_err("configfs_depend_item() failed"
" for dest_node_acl->acl_group\n"); " for dest_node_acl->acl_group\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count); atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic();
core_scsi3_tpg_undepend_item(tmp_tpg); core_scsi3_tpg_undepend_item(tmp_tpg);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out_unmap; goto out_unmap;
...@@ -1646,8 +1626,7 @@ core_scsi3_decode_spec_i_port( ...@@ -1646,8 +1626,7 @@ core_scsi3_decode_spec_i_port(
if (core_scsi3_lunacl_depend_item(dest_se_deve)) { if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item()" pr_err("core_scsi3_lunacl_depend_item()"
" failed\n"); " failed\n");
atomic_dec(&dest_se_deve->pr_ref_count); atomic_dec_mb(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic();
core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg); core_scsi3_tpg_undepend_item(dest_tpg);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
...@@ -3167,15 +3146,13 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, ...@@ -3167,15 +3146,13 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
if (!dest_tf_ops) if (!dest_tf_ops)
continue; continue;
atomic_inc(&dest_se_tpg->tpg_pr_ref_count); atomic_inc_mb(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic();
spin_unlock(&dev->se_port_lock); spin_unlock(&dev->se_port_lock);
if (core_scsi3_tpg_depend_item(dest_se_tpg)) { if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
pr_err("core_scsi3_tpg_depend_item() failed" pr_err("core_scsi3_tpg_depend_item() failed"
" for dest_se_tpg\n"); " for dest_se_tpg\n");
atomic_dec(&dest_se_tpg->tpg_pr_ref_count); atomic_dec_mb(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic();
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out_put_pr_reg; goto out_put_pr_reg;
} }
...@@ -3271,10 +3248,8 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, ...@@ -3271,10 +3248,8 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
spin_lock_irq(&dest_se_tpg->acl_node_lock); spin_lock_irq(&dest_se_tpg->acl_node_lock);
dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
initiator_str); initiator_str);
if (dest_node_acl) { if (dest_node_acl)
atomic_inc(&dest_node_acl->acl_pr_ref_count); atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic();
}
spin_unlock_irq(&dest_se_tpg->acl_node_lock); spin_unlock_irq(&dest_se_tpg->acl_node_lock);
if (!dest_node_acl) { if (!dest_node_acl) {
...@@ -3288,8 +3263,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, ...@@ -3288,8 +3263,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
pr_err("core_scsi3_nodeacl_depend_item() for" pr_err("core_scsi3_nodeacl_depend_item() for"
" dest_node_acl\n"); " dest_node_acl\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count); atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic();
dest_node_acl = NULL; dest_node_acl = NULL;
ret = TCM_INVALID_PARAMETER_LIST; ret = TCM_INVALID_PARAMETER_LIST;
goto out; goto out;
...@@ -3313,8 +3287,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, ...@@ -3313,8 +3287,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
if (core_scsi3_lunacl_depend_item(dest_se_deve)) { if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item() failed\n"); pr_err("core_scsi3_lunacl_depend_item() failed\n");
atomic_dec(&dest_se_deve->pr_ref_count); atomic_dec_mb(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic();
dest_se_deve = NULL; dest_se_deve = NULL;
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out; goto out;
...@@ -3879,8 +3852,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) ...@@ -3879,8 +3852,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
se_tpg = pr_reg->pr_reg_nacl->se_tpg; se_tpg = pr_reg->pr_reg_nacl->se_tpg;
add_desc_len = 0; add_desc_len = 0;
atomic_inc(&pr_reg->pr_res_holders); atomic_inc_mb(&pr_reg->pr_res_holders);
smp_mb__after_atomic();
spin_unlock(&pr_tmpl->registration_lock); spin_unlock(&pr_tmpl->registration_lock);
/* /*
* Determine expected length of $FABRIC_MOD specific * Determine expected length of $FABRIC_MOD specific
...@@ -3893,8 +3865,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) ...@@ -3893,8 +3865,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
" out of buffer: %d\n", cmd->data_length); " out of buffer: %d\n", cmd->data_length);
spin_lock(&pr_tmpl->registration_lock); spin_lock(&pr_tmpl->registration_lock);
atomic_dec(&pr_reg->pr_res_holders); atomic_dec_mb(&pr_reg->pr_res_holders);
smp_mb__after_atomic();
break; break;
} }
/* /*
...@@ -3955,8 +3926,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) ...@@ -3955,8 +3926,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
se_nacl, pr_reg, &format_code, &buf[off+4]); se_nacl, pr_reg, &format_code, &buf[off+4]);
spin_lock(&pr_tmpl->registration_lock); spin_lock(&pr_tmpl->registration_lock);
atomic_dec(&pr_reg->pr_res_holders); atomic_dec_mb(&pr_reg->pr_res_holders);
smp_mb__after_atomic();
/* /*
* Set the ADDITIONAL DESCRIPTOR LENGTH * Set the ADDITIONAL DESCRIPTOR LENGTH
*/ */
......
...@@ -752,8 +752,7 @@ void target_qf_do_work(struct work_struct *work) ...@@ -752,8 +752,7 @@ void target_qf_do_work(struct work_struct *work)
list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
list_del(&cmd->se_qf_node); list_del(&cmd->se_qf_node);
atomic_dec(&dev->dev_qf_count); atomic_dec_mb(&dev->dev_qf_count);
smp_mb__after_atomic();
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
...@@ -1721,8 +1720,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) ...@@ -1721,8 +1720,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
cmd->t_task_cdb[0], cmd->se_ordered_id); cmd->t_task_cdb[0], cmd->se_ordered_id);
return false; return false;
case MSG_ORDERED_TAG: case MSG_ORDERED_TAG:
atomic_inc(&dev->dev_ordered_sync); atomic_inc_mb(&dev->dev_ordered_sync);
smp_mb__after_atomic();
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
" se_ordered_id: %u\n", " se_ordered_id: %u\n",
...@@ -1739,8 +1737,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) ...@@ -1739,8 +1737,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
/* /*
* For SIMPLE and UNTAGGED Task Attribute commands * For SIMPLE and UNTAGGED Task Attribute commands
*/ */
atomic_inc(&dev->simple_cmds); atomic_inc_mb(&dev->simple_cmds);
smp_mb__after_atomic();
break; break;
} }
...@@ -1844,8 +1841,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) ...@@ -1844,8 +1841,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
return; return;
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds); atomic_dec_mb(&dev->simple_cmds);
smp_mb__after_atomic();
dev->dev_cur_ordered_id++; dev->dev_cur_ordered_id++;
pr_debug("Incremented dev->dev_cur_ordered_id: %u for" pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id, " SIMPLE: %u\n", dev->dev_cur_ordered_id,
...@@ -1856,8 +1852,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) ...@@ -1856,8 +1852,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id); cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
atomic_dec(&dev->dev_ordered_sync); atomic_dec_mb(&dev->dev_ordered_sync);
smp_mb__after_atomic();
dev->dev_cur_ordered_id++; dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
...@@ -1915,8 +1910,7 @@ static void transport_handle_queue_full( ...@@ -1915,8 +1910,7 @@ static void transport_handle_queue_full(
{ {
spin_lock_irq(&dev->qf_cmd_lock); spin_lock_irq(&dev->qf_cmd_lock);
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
atomic_inc(&dev->dev_qf_count); atomic_inc_mb(&dev->dev_qf_count);
smp_mb__after_atomic();
spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
schedule_work(&cmd->se_dev->qf_work_queue); schedule_work(&cmd->se_dev->qf_work_queue);
......
...@@ -161,8 +161,7 @@ int core_scsi3_ua_allocate( ...@@ -161,8 +161,7 @@ int core_scsi3_ua_allocate(
spin_unlock(&deve->ua_lock); spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
atomic_inc(&deve->ua_count); atomic_inc_mb(&deve->ua_count);
smp_mb__after_atomic();
return 0; return 0;
} }
list_add_tail(&ua->ua_nacl_list, &deve->ua_list); list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
...@@ -174,8 +173,7 @@ int core_scsi3_ua_allocate( ...@@ -174,8 +173,7 @@ int core_scsi3_ua_allocate(
nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
asc, ascq); asc, ascq);
atomic_inc(&deve->ua_count); atomic_inc_mb(&deve->ua_count);
smp_mb__after_atomic();
return 0; return 0;
} }
...@@ -189,8 +187,7 @@ void core_scsi3_ua_release_all( ...@@ -189,8 +187,7 @@ void core_scsi3_ua_release_all(
list_del(&ua->ua_nacl_list); list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua); kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count); atomic_dec_mb(&deve->ua_count);
smp_mb__after_atomic();
} }
spin_unlock(&deve->ua_lock); spin_unlock(&deve->ua_lock);
} }
...@@ -250,8 +247,7 @@ void core_scsi3_ua_for_check_condition( ...@@ -250,8 +247,7 @@ void core_scsi3_ua_for_check_condition(
list_del(&ua->ua_nacl_list); list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua); kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count); atomic_dec_mb(&deve->ua_count);
smp_mb__after_atomic();
} }
spin_unlock(&deve->ua_lock); spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
...@@ -309,8 +305,7 @@ int core_scsi3_ua_clear_for_request_sense( ...@@ -309,8 +305,7 @@ int core_scsi3_ua_clear_for_request_sense(
list_del(&ua->ua_nacl_list); list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua); kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count); atomic_dec_mb(&deve->ua_count);
smp_mb__after_atomic();
} }
spin_unlock(&deve->ua_lock); spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
......
...@@ -903,4 +903,18 @@ struct se_wwn { ...@@ -903,4 +903,18 @@ struct se_wwn {
struct config_group fabric_stat_group; struct config_group fabric_stat_group;
}; };
static inline void atomic_inc_mb(atomic_t *v)
{
smp_mb__before_atomic();
atomic_inc(v);
smp_mb__after_atomic();
}
static inline void atomic_dec_mb(atomic_t *v)
{
smp_mb__before_atomic();
atomic_dec(v);
smp_mb__after_atomic();
}
#endif /* TARGET_CORE_BASE_H */ #endif /* TARGET_CORE_BASE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment