Commit d7175373 authored by Mike Christie's avatar Mike Christie Committed by Nicholas Bellinger

target: fix ALUA transition timeout handling

The implicit transition time tells initiators the min time
to wait before timing out a transition. We currently schedule
the transition to occur in tg_pt_gp_implicit_trans_secs
seconds so there is no room for delays. If
core_alua_do_transition_tg_pt_work->core_alua_update_tpg_primary_metadata
needs to write out info to a remote file, then the initiator can
easily time out the operation.
Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 207ee841
...@@ -1013,7 +1013,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) ...@@ -1013,7 +1013,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
static void core_alua_do_transition_tg_pt_work(struct work_struct *work) static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
{ {
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
...@@ -1076,13 +1076,12 @@ static int core_alua_do_transition_tg_pt( ...@@ -1076,13 +1076,12 @@ static int core_alua_do_transition_tg_pt(
/* /*
* Flush any pending transitions * Flush any pending transitions
*/ */
if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && if (!explicit && atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
ALUA_ACCESS_STATE_TRANSITION) { ALUA_ACCESS_STATE_TRANSITION) {
/* Just in case */ /* Just in case */
tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
tg_pt_gp->tg_pt_gp_transition_complete = &wait; tg_pt_gp->tg_pt_gp_transition_complete = &wait;
flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
wait_for_completion(&wait); wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL; tg_pt_gp->tg_pt_gp_transition_complete = NULL;
return 0; return 0;
...@@ -1117,15 +1116,9 @@ static int core_alua_do_transition_tg_pt( ...@@ -1117,15 +1116,9 @@ static int core_alua_do_transition_tg_pt(
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
unsigned long transition_tmo; if (explicit) {
transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
schedule_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work,
transition_tmo);
} else {
tg_pt_gp->tg_pt_gp_transition_complete = &wait; tg_pt_gp->tg_pt_gp_transition_complete = &wait;
schedule_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work, 0);
wait_for_completion(&wait); wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL; tg_pt_gp->tg_pt_gp_transition_complete = NULL;
} }
...@@ -1696,8 +1689,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, ...@@ -1696,8 +1689,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
core_alua_do_transition_tg_pt_work); core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev; tg_pt_gp->tg_pt_gp_dev = dev;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
...@@ -1805,7 +1798,7 @@ void core_alua_free_tg_pt_gp( ...@@ -1805,7 +1798,7 @@ void core_alua_free_tg_pt_gp(
dev->t10_alua.alua_tg_pt_gps_counter--; dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/* /*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by * Allow a struct t10_alua_tg_pt_gp_member * referenced by
......
...@@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp { ...@@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp {
struct list_head tg_pt_gp_lun_list; struct list_head tg_pt_gp_lun_list;
struct se_lun *tg_pt_gp_alua_lun; struct se_lun *tg_pt_gp_alua_lun;
struct se_node_acl *tg_pt_gp_alua_nacl; struct se_node_acl *tg_pt_gp_alua_nacl;
struct delayed_work tg_pt_gp_transition_work; struct work_struct tg_pt_gp_transition_work;
struct completion *tg_pt_gp_transition_complete; struct completion *tg_pt_gp_transition_complete;
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment