qla_target.c 188 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
 *
 *  based on qla2x00t.c code:
 *
 *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
 *  Copyright (C) 2004 - 2005 Leonid Stoljar
 *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
 *  Copyright (C) 2006 - 2010 ID7 Ltd.
 *
 *  Forward port and refactoring to modern qla2xxx and target/configfs
 *
13
 *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation, version 2
 *  of the License.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 *  GNU General Public License for more details.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>

#include "qla_def.h"
#include "qla_target.h"

45 46 47 48 49
static int ql2xtgt_tape_enable;
module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xtgt_tape_enable,
		"Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");

50 51 52 53 54 55 56 57
static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
module_param(qlini_mode, charp, S_IRUGO);
MODULE_PARM_DESC(qlini_mode,
	"Determines when initiator mode will be enabled. Possible values: "
	"\"exclusive\" - initiator mode will be enabled on load, "
	"disabled on enabling target mode and then on disabling target mode "
	"enabled back; "
	"\"disabled\" - initiator mode will never be enabled; "
58 59
	"\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
	"when ready "
60 61
	"\"enabled\" (default) - initiator mode will always stay enabled.");

62 63 64 65 66 67 68
static int ql_dm_tgt_ex_pct = 50;
module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
	"For Dual Mode (qlini_mode=dual), this parameter determines "
	"the percentage of exchanges/cmds FW will allocate resources "
	"for Target mode.");

69
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
70

71 72
static int temp_sam_status = SAM_STAT_BUSY;

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * From scsi/fc/fc_fcp.h
 */
enum fcp_resp_rsp_codes {
	FCP_TMF_CMPL = 0,
	FCP_DATA_LEN_INVALID = 1,
	FCP_CMND_FIELDS_INVALID = 2,
	FCP_DATA_PARAM_MISMATCH = 3,
	FCP_TMF_REJECTED = 4,
	FCP_TMF_FAILED = 5,
	FCP_TMF_INVALID_LUN = 9,
};

/*
 * fc_pri_ta from scsi/fc/fc_fcp.h
 */
#define FCP_PTA_SIMPLE      0   /* simple task attribute */
#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
#define FCP_PTA_ORDERED     2   /* ordered task attribute */
92
#define FCP_PTA_ACA         4   /* auto. contingent allegiance */
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
#define FCP_PTA_MASK        7   /* mask for task attribute field */
#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */

/*
 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
 * must be called under HW lock and could unlock/lock it inside.
 * It isn't an issue, since in the current implementation on the time when
 * those functions are called:
 *
 *   - Either context is IRQ and only IRQ handler can modify HW data,
 *     including rings related fields,
 *
 *   - Or access to target mode variables from struct qla_tgt doesn't
 *     cross those functions boundaries, except tgt_stop, which
 *     additionally protected by irq_cmd_count.
 */
/* Predefs for callbacks handed to qla2xxx LLD */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
112
	struct atio_from_isp *pkt, uint8_t);
113
static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
114
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
115 116
	int fn, void *iocb, int flags);
static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
117
	*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
118 119
static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
	struct qla_tgt_cmd *cmd);
120 121
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
	struct atio_from_isp *atio, uint16_t status, int qfull);
122
static void qlt_disable_vha(struct scsi_qla_host *vha);
123
static void qlt_clear_tgt_db(struct qla_tgt *tgt);
124 125 126 127
static void qlt_send_notify_ack(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *ntfy,
	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
128 129
static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *imm, int ha_locked);
130 131 132
static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
	fc_port_t *fcport, bool local);
void qlt_unreg_sess(struct fc_port *sess);
133 134 135
static void qlt_24xx_handle_abts(struct scsi_qla_host *,
	struct abts_recv_from_24xx *);

136 137 138 139
/*
 * Global Variables
 */
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
140
static struct kmem_cache *qla_tgt_plogi_cachep;
141 142 143 144 145
static mempool_t *qla_tgt_mgmt_cmd_mempool;
static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);

146 147 148 149 150 151 152 153 154 155 156 157 158 159
static const char *prot_op_str(u32 prot_op)
{
	switch (prot_op) {
	case TARGET_PROT_NORMAL:	return "NORMAL";
	case TARGET_PROT_DIN_INSERT:	return "DIN_INSERT";
	case TARGET_PROT_DOUT_INSERT:	return "DOUT_INSERT";
	case TARGET_PROT_DIN_STRIP:	return "DIN_STRIP";
	case TARGET_PROT_DOUT_STRIP:	return "DOUT_STRIP";
	case TARGET_PROT_DIN_PASS:	return "DIN_PASS";
	case TARGET_PROT_DOUT_PASS:	return "DOUT_PASS";
	default:			return "UNKNOWN";
	}
}

160 161 162 163 164 165 166 167 168 169
/* This API intentionally takes dest as a parameter, rather than returning
 * int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
{
	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
	*dest = atomic_inc_return(&base_vha->generation_tick);
	/* memory barrier */
	wmb();
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/* Might release hw lock, then reaquire!! */
static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
{
	/* Send marker if required */
	if (unlikely(vha->marker_needed != 0)) {
		int rc = qla2x00_issue_marker(vha, vha_locked);
		if (rc != QLA_SUCCESS) {
			ql_dbg(ql_dbg_tgt, vha, 0xe03d,
			    "qla_target(%d): issue_marker() failed\n",
			    vha->vp_idx);
		}
		return rc;
	}
	return QLA_SUCCESS;
}

static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
	uint8_t *d_id)
{
190 191
	struct scsi_qla_host *host;
	uint32_t key = 0;
192

193 194
	if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
	    (vha->d_id.b.al_pa == d_id[2]))
195 196
		return vha;

197 198 199
	key  = (uint32_t)d_id[0] << 16;
	key |= (uint32_t)d_id[1] <<  8;
	key |= (uint32_t)d_id[2];
200

201 202 203 204 205 206
	host = btree_lookup32(&vha->hw->tgt.host_map, key);
	if (!host)
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
			   "Unable to find host %06x\n", key);

	return host;
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
}

static inline
struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
	uint16_t vp_idx)
{
	struct qla_hw_data *ha = vha->hw;

	if (vha->vp_idx == vp_idx)
		return vha;

	BUG_ON(ha->tgt.tgt_vp_map == NULL);
	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
		return ha->tgt.tgt_vp_map[vp_idx].vha;

	return NULL;
}

225 226 227 228 229 230 231
static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
{
	unsigned long flags;

	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);

	vha->hw->tgt.num_pend_cmds++;
232 233
	if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
		vha->qla_stats.stat_max_pend_cmds =
234 235 236 237 238 239 240 241 242 243 244 245
			vha->hw->tgt.num_pend_cmds;
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
{
	unsigned long flags;

	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
	vha->hw->tgt.num_pend_cmds--;
	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344

static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
	struct atio_from_isp *atio,	uint8_t ha_locked)
{
	struct qla_tgt_sess_op *u;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	unsigned long flags;

	if (tgt->tgt_stop) {
		ql_dbg(ql_dbg_async, vha, 0xffff,
			   "qla_target(%d): dropping unknown ATIO_TYPE7, "
			   "because tgt is being stopped", vha->vp_idx);
		goto out_term;
	}

	u = kzalloc(sizeof(*u), GFP_ATOMIC);
	if (u == NULL) {
		ql_dbg(ql_dbg_async, vha, 0xffff,
		    "Alloc of struct unknown_atio (size %zd) failed", sizeof(*u));
		/* It should be harmless and on the next retry should work well */
		goto out_term;
	}

	u->vha = vha;
	memcpy(&u->atio, atio, sizeof(*atio));
	INIT_LIST_HEAD(&u->cmd_list);

	spin_lock_irqsave(&vha->cmd_list_lock, flags);
	list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);

	schedule_delayed_work(&vha->unknown_atio_work, 1);

out:
	return;

out_term:
	qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0);
	goto out;
}

static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
	uint8_t ha_locked)
{
	struct qla_tgt_sess_op *u, *t;
	scsi_qla_host_t *host;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	unsigned long flags;
	uint8_t queued = 0;

	list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
		if (u->aborted) {
			ql_dbg(ql_dbg_async, vha, 0xffff,
			    "Freeing unknown %s %p, because of Abort",
			    "ATIO_TYPE7", u);
			qlt_send_term_exchange(vha, NULL, &u->atio,
			    ha_locked, 0);
			goto abort;
		}

		host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
		if (host != NULL) {
			ql_dbg(ql_dbg_async, vha, 0xffff,
				"Requeuing unknown ATIO_TYPE7 %p", u);
			qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
		} else if (tgt->tgt_stop) {
			ql_dbg(ql_dbg_async, vha, 0xffff,
				"Freeing unknown %s %p, because tgt is being stopped",
				"ATIO_TYPE7", u);
			qlt_send_term_exchange(vha, NULL, &u->atio,
			    ha_locked, 0);
		} else {
			ql_dbg(ql_dbg_async, vha, 0xffff,
				"u %p, vha %p, host %p, sched again..", u,
				vha, host);
			if (!queued) {
				queued = 1;
				schedule_delayed_work(&vha->unknown_atio_work,
				    1);
			}
			continue;
		}

abort:
		spin_lock_irqsave(&vha->cmd_list_lock, flags);
		list_del(&u->cmd_list);
		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
		kfree(u);
	}
}

void qlt_unknown_atio_work_fn(struct work_struct *work)
{
	struct scsi_qla_host *vha = container_of(to_delayed_work(work),
	    struct scsi_qla_host, unknown_atio_work);

	qlt_try_to_dequeue_unknown_atios(vha, 0);
}

345
static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
346
	struct atio_from_isp *atio, uint8_t ha_locked)
347
{
348 349 350 351 352
	ql_dbg(ql_dbg_tgt, vha, 0xe072,
		"%s: qla_target(%d): type %x ox_id %04x\n",
		__func__, vha->vp_idx, atio->u.raw.entry_type,
		be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));

353 354 355 356 357 358 359 360 361 362 363 364
	switch (atio->u.raw.entry_type) {
	case ATIO_TYPE7:
	{
		struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
		    atio->u.isp24.fcp_hdr.d_id);
		if (unlikely(NULL == host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe03e,
			    "qla_target(%d): Received ATIO_TYPE7 "
			    "with unknown d_id %x:%x:%x\n", vha->vp_idx,
			    atio->u.isp24.fcp_hdr.d_id[0],
			    atio->u.isp24.fcp_hdr.d_id[1],
			    atio->u.isp24.fcp_hdr.d_id[2]);
365 366 367


			qlt_queue_unknown_atio(vha, atio, ha_locked);
368 369
			break;
		}
370 371 372
		if (unlikely(!list_empty(&vha->unknown_atio_list)))
			qlt_try_to_dequeue_unknown_atios(vha, ha_locked);

373
		qlt_24xx_atio_pkt(host, atio, ha_locked);
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
		break;
	}

	case IMMED_NOTIFY_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct imm_ntfy_from_isp *entry =
		    (struct imm_ntfy_from_isp *)atio;

		if ((entry->u.isp24.vp_index != 0xFF) &&
		    (entry->u.isp24.nport_handle != 0xFFFF)) {
			host = qlt_find_host_by_vp_idx(vha,
			    entry->u.isp24.vp_index);
			if (unlikely(!host)) {
				ql_dbg(ql_dbg_tgt, vha, 0xe03f,
				    "qla_target(%d): Received "
				    "ATIO (IMMED_NOTIFY_TYPE) "
				    "with unknown vp_index %d\n",
				    vha->vp_idx, entry->u.isp24.vp_index);
				break;
			}
		}
396
		qlt_24xx_atio_pkt(host, atio, ha_locked);
397 398 399
		break;
	}

400 401 402 403 404 405 406 407 408 409 410
	case VP_RPT_ID_IOCB_TYPE:
		qla24xx_report_id_acquisition(vha,
			(struct vp_rpt_id_entry_24xx *)atio);
		break;

	case ABTS_RECV_24XX:
	{
		struct abts_recv_from_24xx *entry =
			(struct abts_recv_from_24xx *)atio;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
			entry->vp_index);
411 412
		unsigned long flags;

413 414 415 416 417 418 419
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xffff,
			    "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->vp_index);
			break;
		}
420 421 422 423 424
		if (!ha_locked)
			spin_lock_irqsave(&host->hw->hardware_lock, flags);
		qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
		if (!ha_locked)
			spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
425 426 427 428 429
		break;
	}

	/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */

430 431 432 433 434 435 436
	default:
		ql_dbg(ql_dbg_tgt, vha, 0xe040,
		    "qla_target(%d): Received unknown ATIO atio "
		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
		break;
	}

437
	return false;
438 439 440 441 442
}

void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
{
	switch (pkt->entry_type) {
443 444 445 446
	case CTIO_CRC2:
		ql_dbg(ql_dbg_tgt, vha, 0xe073,
			"qla_target(%d):%s: CRC2 Response pkt\n",
			vha->vp_idx, __func__);
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
	case CTIO_TYPE7:
	{
		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe041,
			    "qla_target(%d): Response pkt (CTIO_TYPE7) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case IMMED_NOTIFY_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct imm_ntfy_from_isp *entry =
		    (struct imm_ntfy_from_isp *)pkt;

		host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe042,
			    "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
			    "received, with unknown vp_index %d\n",
			    vha->vp_idx, entry->u.isp24.vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case NOTIFY_ACK_TYPE:
	{
		struct scsi_qla_host *host = vha;
		struct nack_to_isp *entry = (struct nack_to_isp *)pkt;

		if (0xFF != entry->u.isp24.vp_index) {
			host = qlt_find_host_by_vp_idx(vha,
			    entry->u.isp24.vp_index);
			if (unlikely(!host)) {
				ql_dbg(ql_dbg_tgt, vha, 0xe043,
				    "qla_target(%d): Response "
				    "pkt (NOTIFY_ACK_TYPE) "
				    "received, with unknown "
				    "vp_index %d\n", vha->vp_idx,
				    entry->u.isp24.vp_index);
				break;
			}
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case ABTS_RECV_24XX:
	{
		struct abts_recv_from_24xx *entry =
		    (struct abts_recv_from_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe044,
			    "qla_target(%d): Response pkt "
			    "(ABTS_RECV_24XX) received, with unknown "
			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	case ABTS_RESP_24XX:
	{
		struct abts_resp_to_24xx *entry =
		    (struct abts_resp_to_24xx *)pkt;
		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
		    entry->vp_index);
		if (unlikely(!host)) {
			ql_dbg(ql_dbg_tgt, vha, 0xe045,
			    "qla_target(%d): Response pkt "
			    "(ABTS_RECV_24XX) received, with unknown "
			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
			break;
		}
		qlt_response_pkt(host, pkt);
		break;
	}

	default:
		qlt_response_pkt(vha, pkt);
		break;
	}

}

544 545 546
/*
 * All qlt_plogi_ack_t operations are protected by hardware_lock
 */
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
	struct imm_ntfy_from_isp *ntfy, int type)
{
	struct qla_work_evt *e;
	e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
	if (!e)
		return QLA_FUNCTION_FAILED;

	e->u.nack.fcport = fcport;
	e->u.nack.type = type;
	memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
	return qla2x00_post_work(vha, e);
}

static
562
void qla2x00_async_nack_sp_done(void *s, int res)
563 564
{
	struct srb *sp = (struct srb *)s;
565
	struct scsi_qla_host *vha = sp->vha;
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
	unsigned long flags;

	ql_dbg(ql_dbg_disc, vha, 0xffff,
		"Async done-%s res %x %8phC  type %d\n",
		sp->name, res, sp->fcport->port_name, sp->type);

	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
	sp->fcport->flags &= ~FCF_ASYNC_SENT;
	sp->fcport->chip_reset = vha->hw->chip_reset;

	switch (sp->type) {
	case SRB_NACK_PLOGI:
		sp->fcport->login_gen++;
		sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
		sp->fcport->logout_on_delete = 1;
581
		sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
		break;

	case SRB_NACK_PRLI:
		sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
		sp->fcport->deleted = 0;

		if (!sp->fcport->login_succ &&
		    !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
			sp->fcport->login_succ = 1;

			vha->fcport_count++;

			if (!IS_IIDMA_CAPABLE(vha->hw) ||
			    !vha->hw->flags.gpsc_supported) {
				ql_dbg(ql_dbg_disc, vha, 0xffff,
					"%s %d %8phC post upd_fcport fcp_cnt %d\n",
					__func__, __LINE__,
					sp->fcport->port_name,
					vha->fcport_count);

				qla24xx_post_upd_fcport_work(vha, sp->fcport);
			} else {
				ql_dbg(ql_dbg_disc, vha, 0xffff,
					"%s %d %8phC post gpsc fcp_cnt %d\n",
					__func__, __LINE__,
					sp->fcport->port_name,
					vha->fcport_count);

				qla24xx_post_gpsc_work(vha, sp->fcport);
			}
		}
		break;

	case SRB_NACK_LOGO:
		sp->fcport->login_gen++;
		sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
		qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
		break;
	}
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);

623
	sp->free(sp);
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
}

int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
	struct imm_ntfy_from_isp *ntfy, int type)
{
	int rval = QLA_FUNCTION_FAILED;
	srb_t *sp;
	char *c = NULL;

	fcport->flags |= FCF_ASYNC_SENT;
	switch (type) {
	case SRB_NACK_PLOGI:
		fcport->fw_login_state = DSC_LS_PLOGI_PEND;
		c = "PLOGI";
		break;
	case SRB_NACK_PRLI:
		fcport->fw_login_state = DSC_LS_PRLI_PEND;
		c = "PRLI";
		break;
	case SRB_NACK_LOGO:
		fcport->fw_login_state = DSC_LS_LOGO_PEND;
		c = "LOGO";
		break;
	}

	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
	if (!sp)
		goto done;

	sp->type = type;
	sp->name = "nack";

	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);

	sp->u.iocb_cmd.u.nack.ntfy = ntfy;

	sp->done = qla2x00_async_nack_sp_done;

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS)
		goto done_free_sp;

	ql_dbg(ql_dbg_disc, vha, 0xffff,
		"Async-%s %8phC hndl %x %s\n",
		sp->name, fcport->port_name, sp->handle, c);

	return rval;

done_free_sp:
673
	sp->free(sp);
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
done:
	fcport->flags &= ~FCF_ASYNC_SENT;
	return rval;
}

void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
	fc_port_t *t;
	unsigned long flags;

	switch (e->u.nack.type) {
	case SRB_NACK_PRLI:
		mutex_lock(&vha->vha_tgt.tgt_mutex);
		t = qlt_create_sess(vha, e->u.nack.fcport, 0);
		mutex_unlock(&vha->vha_tgt.tgt_mutex);
		if (t) {
			ql_log(ql_log_info, vha, 0xffff,
			    "%s create sess success %p", __func__, t);
			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
			/* create sess has an extra kref */
			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
		}
		break;
	}
	qla24xx_async_notify_ack(vha, e->u.nack.fcport,
	    (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
}

void qla24xx_delete_sess_fn(struct work_struct *work)
{
	fc_port_t *fcport = container_of(work, struct fc_port, del_work);
	struct qla_hw_data *ha = fcport->vha->hw;
	unsigned long flags;

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);

	if (fcport->se_sess) {
		ha->tgt.tgt_ops->shutdown_sess(fcport);
		ha->tgt.tgt_ops->put_sess(fcport);
	} else {
		qlt_unreg_sess(fcport);
	}
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}

/*
 * Called from qla2x00_reg_remote_port()
 */
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
{
	struct qla_hw_data *ha = vha->hw;
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
	struct fc_port *sess = fcport;
	unsigned long flags;

	if (!vha->hw->tgt.tgt_ops)
		return;

	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	if (tgt->tgt_stop) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
		return;
	}

	if (fcport->disc_state == DSC_DELETE_PEND) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
		return;
	}

	if (!sess->se_sess) {
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

		mutex_lock(&vha->vha_tgt.tgt_mutex);
		sess = qlt_create_sess(vha, fcport, false);
		mutex_unlock(&vha->vha_tgt.tgt_mutex);

		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	} else {
		if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
			return;
		}

		if (!kref_get_unless_zero(&sess->sess_kref)) {
			ql_dbg(ql_dbg_disc, vha, 0xffff,
			    "%s: kref_get fail sess %8phC \n",
			    __func__, sess->port_name);
			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
			return;
		}

		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
		    "qla_target(%u): %ssession for port %8phC "
		    "(loop ID %d) reappeared\n", vha->vp_idx,
		    sess->local ? "local " : "", sess->port_name, sess->loop_id);

		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
		    "Reappeared sess %p\n", sess);

		ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
		    fcport->loop_id,
		    (fcport->flags & FCF_CONF_COMP_SUPPORTED));
	}

	if (sess && sess->local) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
		    "qla_target(%u): local session for "
		    "port %8phC (loop ID %d) became global\n", vha->vp_idx,
		    fcport->port_name, sess->loop_id);
		sess->local = 0;
	}
	ha->tgt.tgt_ops->put_sess(sess);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
789 790 791 792 793 794

/*
 * This is a zero-base ref-counting solution, since hardware_lock
 * guarantees that ref_count is not modified concurrently.
 * Upon successful return content of iocb is undefined
 */
795
static struct qlt_plogi_ack_t *
796 797 798
qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
		       struct imm_ntfy_from_isp *iocb)
{
799
	struct qlt_plogi_ack_t *pla;
800 801 802 803

	list_for_each_entry(pla, &vha->plogi_ack_list, list) {
		if (pla->id.b24 == id->b24) {
			qlt_send_term_imm_notif(vha, &pla->iocb, 1);
804
			memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
805 806 807 808 809 810 811 812 813 814 815 816
			return pla;
		}
	}

	pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
	if (!pla) {
		ql_dbg(ql_dbg_async, vha, 0x5088,
		       "qla_target(%d): Allocation of plogi_ack failed\n",
		       vha->vp_idx);
		return NULL;
	}

817
	memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
818 819 820 821 822 823
	pla->id = *id;
	list_add_tail(&pla->list, &vha->plogi_ack_list);

	return pla;
}

824
void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
825
    struct qlt_plogi_ack_t *pla)
826
{
827
	struct imm_ntfy_from_isp *iocb = &pla->iocb;
828 829 830 831
	port_id_t port_id;
	uint16_t loop_id;
	fc_port_t *fcport = pla->fcport;

832 833 834 835 836 837
	BUG_ON(!pla->ref_count);
	pla->ref_count--;

	if (pla->ref_count)
		return;

838
	ql_dbg(ql_dbg_disc, vha, 0x5089,
839
	    "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
840 841 842 843 844
	    " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
	    iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
	    iocb->u.isp24.port_id[0],
	    le16_to_cpu(iocb->u.isp24.nport_handle),
	    iocb->u.isp24.exchange_address, iocb->ox_id);
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862

	port_id.b.domain = iocb->u.isp24.port_id[2];
	port_id.b.area   = iocb->u.isp24.port_id[1];
	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
	port_id.b.rsvd_1 = 0;

	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);

	fcport->loop_id = loop_id;
	fcport->d_id = port_id;
	qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);

	list_for_each_entry(fcport, &vha->vp_fcports, list) {
		if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
			fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
		if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
			fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
	}
863 864 865 866 867

	list_del(&pla->list);
	kmem_cache_free(qla_tgt_plogi_cachep, pla);
}

868
void
869 870
qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
    struct fc_port *sess, enum qlt_plogi_link_t link)
871
{
872
	struct imm_ntfy_from_isp *iocb = &pla->iocb;
873 874 875
	/* Inc ref_count first because link might already be pointing at pla */
	pla->ref_count++;

876 877 878 879 880 881 882 883
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
		"Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
		" s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
		sess, link, sess->port_name,
		iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
		pla->ref_count, pla, link);

884 885 886
	if (sess->plogi_link[link])
		qlt_plogi_ack_unref(vha, sess->plogi_link[link]);

887 888
	if (link == QLT_PLOGI_LINK_SAME_WWN)
		pla->fcport = sess;
889 890 891 892

	sess->plogi_link[link] = pla;
}

893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
typedef struct {
	/* These fields must be initialized by the caller */
	port_id_t id;
	/*
	 * number of cmds dropped while we were waiting for
	 * initiator to ack LOGO initialize to 1 if LOGO is
	 * triggered by a command, otherwise, to 0
	 */
	int cmd_count;

	/* These fields are used by callee */
	struct list_head list;
} qlt_port_logo_t;

static void
qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
{
	qlt_port_logo_t *tmp;
	int res;

	mutex_lock(&vha->vha_tgt.tgt_mutex);

	list_for_each_entry(tmp, &vha->logo_list, list) {
		if (tmp->id.b24 == logo->id.b24) {
			tmp->cmd_count += logo->cmd_count;
			mutex_unlock(&vha->vha_tgt.tgt_mutex);
			return;
		}
	}

	list_add_tail(&logo->list, &vha->logo_list);

	mutex_unlock(&vha->vha_tgt.tgt_mutex);

	res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);

	mutex_lock(&vha->vha_tgt.tgt_mutex);
	list_del(&logo->list);
	mutex_unlock(&vha->vha_tgt.tgt_mutex);

933 934 935 936
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
	    "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
	    logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
	    logo->cmd_count, res);
937 938
}

939 940
static void qlt_free_session_done(struct work_struct *work)
{
941
	struct fc_port *sess = container_of(work, struct fc_port,
942 943 944 945
	    free_work);
	struct qla_tgt *tgt = sess->tgt;
	struct scsi_qla_host *vha = sess->vha;
	struct qla_hw_data *ha = vha->hw;
946 947
	unsigned long flags;
	bool logout_started = false;
948
	struct event_arg ea;
949
	scsi_qla_host_t *base_vha;
950 951 952

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
		"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
953
		" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
954
		__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
955
		sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
956
		sess->logout_on_delete, sess->keep_nport_handle,
957
		sess->send_els_logo);
958

959

960
	if (!IS_SW_RESV_ADDR(sess->d_id)) {
961 962
		if (sess->send_els_logo) {
			qlt_port_logo_t logo;
963

964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
			logo.id = sess->d_id;
			logo.cmd_count = 0;
			qlt_send_first_logo(vha, &logo);
		}

		if (sess->logout_on_delete) {
			int rc;

			rc = qla2x00_post_async_logout_work(vha, sess, NULL);
			if (rc != QLA_SUCCESS)
				ql_log(ql_log_warn, vha, 0xf085,
				    "Schedule logo failed sess %p rc %d\n",
				    sess, rc);
			else
				logout_started = true;
		}
980
	}
981

982 983 984 985 986 987
	/*
	 * Release the target session for FC Nexus from fabric module code.
	 */
	if (sess->se_sess != NULL)
		ha->tgt.tgt_ops->free_session(sess);

988 989 990 991 992 993 994 995 996 997 998 999 1000
	if (logout_started) {
		bool traced = false;

		while (!ACCESS_ONCE(sess->logout_completed)) {
			if (!traced) {
				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
					"%s: waiting for sess %p logout\n",
					__func__, sess);
				traced = true;
			}
			msleep(100);
		}

1001
		ql_dbg(ql_dbg_disc, vha, 0xf087,
1002 1003 1004 1005 1006 1007 1008
		    "%s: sess %p logout completed\n",__func__, sess);
	}

	if (sess->logo_ack_needed) {
		sess->logo_ack_needed = 0;
		qla24xx_async_notify_ack(vha, sess,
			(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1009 1010
	}

1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	if (sess->se_sess) {
		sess->se_sess = NULL;
		if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
			tgt->sess_count--;
	}

	sess->disc_state = DSC_DELETED;
	sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
	sess->deleted = QLA_SESS_DELETED;
	sess->login_retry = vha->hw->login_retry_count;

	if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
		vha->fcport_count--;
		sess->login_succ = 0;
	}

	if (sess->chip_reset != sess->vha->hw->chip_reset)
		qla2x00_clear_loop_id(sess);

	if (sess->conflict) {
		sess->conflict->login_pause = 0;
		sess->conflict = NULL;
		if (!test_bit(UNLOADING, &vha->dpc_flags))
			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
	}

1038
	{
1039
		struct qlt_plogi_ack_t *own =
1040
		    sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1041
		struct qlt_plogi_ack_t *con =
1042
		    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1043
		struct imm_ntfy_from_isp *iocb;
1044 1045

		if (con) {
1046
			iocb = &con->iocb;
1047
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1048 1049 1050 1051 1052 1053
				 "se_sess %p / sess %p port %8phC is gone,"
				 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
				 sess->se_sess, sess, sess->port_name,
				 own ? "releasing own PLOGI" : "no own PLOGI pending",
				 own ? own->ref_count : -1,
				 iocb->u.isp24.port_name, con->ref_count);
1054
			qlt_plogi_ack_unref(vha, con);
1055
			sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1056 1057 1058 1059 1060 1061 1062 1063 1064
		} else {
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
			    "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
			    sess->se_sess, sess, sess->port_name,
			    own ? "releasing own PLOGI" :
			    "no own PLOGI pending",
			    own ? own->ref_count : -1);
		}

1065 1066
		if (own) {
			sess->fw_login_state = DSC_LS_PLOGI_PEND;
1067
			qlt_plogi_ack_unref(vha, own);
1068 1069
			sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
		}
1070
	}
1071 1072
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

1073
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
1074 1075
	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
		sess, sess->port_name, vha->fcport_count);
1076

1077
	if (tgt && (tgt->sess_count == 0))
1078
		wake_up_all(&tgt->waitQ);
1079 1080 1081 1082

	if (vha->fcport_count == 0)
		wake_up_all(&vha->fcport_waitQ);

1083 1084 1085 1086
	base_vha = pci_get_drvdata(ha->pdev);
	if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
		return;

1087 1088 1089 1090 1091 1092
	if (!tgt || !tgt->tgt_stop) {
		memset(&ea, 0, sizeof(ea));
		ea.event = FCME_DELETE_DONE;
		ea.fcport = sess;
		qla2x00_fcport_event_handler(vha, &ea);
	}
1093 1094
}

1095
/* ha->tgt.sess_lock supposed to be held on entry */
1096
void qlt_unreg_sess(struct fc_port *sess)
1097 1098 1099
{
	struct scsi_qla_host *vha = sess->vha;

1100 1101 1102 1103
	ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
	    "%s sess %p for deletion %8phC\n",
	    __func__, sess, sess->port_name);

1104 1105
	if (sess->se_sess)
		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1106

1107 1108
	qla2x00_mark_device_lost(vha, sess, 1, 1);

1109
	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1110 1111 1112
	sess->disc_state = DSC_DELETE_PEND;
	sess->last_rscn_gen = sess->rscn_gen;
	sess->last_login_gen = sess->login_gen;
1113 1114 1115 1116

	INIT_WORK(&sess->free_work, qlt_free_session_done);
	schedule_work(&sess->free_work);
}
1117
EXPORT_SYMBOL(qlt_unreg_sess);
1118

1119 1120 1121
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
	struct qla_hw_data *ha = vha->hw;
1122
	struct fc_port *sess = NULL;
1123 1124 1125
	uint16_t loop_id;
	int res = 0;
	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1126
	unsigned long flags;
1127 1128 1129 1130

	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
	if (loop_id == 0xFFFF) {
		/* Global event */
1131
		atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1132
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1133
		qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1134
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1135
	} else {
1136
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1137
		sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1138
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe000,
	    "Using sess for qla_tgt_reset: %p\n", sess);
	if (!sess) {
		res = -ESRCH;
		return res;
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe047,
1149 1150
	    "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
	    "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1151 1152
	    mcmd, loop_id);

1153
	return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1154 1155
}

1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
static void qla24xx_chk_fcp_state(struct fc_port *sess)
{
	if (sess->chip_reset != sess->vha->hw->chip_reset) {
		sess->logout_on_delete = 0;
		sess->logo_ack_needed = 0;
		sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
		sess->scan_state = 0;
	}
}

1166
/* ha->tgt.sess_lock supposed to be held on entry */
1167
void qlt_schedule_sess_for_deletion(struct fc_port *sess,
1168 1169 1170 1171
	bool immediate)
{
	struct qla_tgt *tgt = sess->tgt;

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
	if (sess->disc_state == DSC_DELETE_PEND)
		return;

	if (sess->disc_state == DSC_DELETED) {
		if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
			wake_up_all(&tgt->waitQ);
		if (sess->vha->fcport_count == 0)
			wake_up_all(&sess->vha->fcport_waitQ);

		if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
			!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
1183 1184
			return;
	}
1185

1186
	sess->disc_state = DSC_DELETE_PEND;
1187

1188 1189 1190 1191 1192
	if (sess->deleted == QLA_SESS_DELETED)
		sess->logout_on_delete = 0;

	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
	qla24xx_chk_fcp_state(sess);
1193

1194 1195
	ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
	    "Scheduling sess %p for deletion\n", sess);
1196

1197 1198
	schedule_work(&sess->del_work);
}
1199

1200 1201 1202 1203 1204 1205 1206
void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
{
	unsigned long flags;
	struct qla_hw_data *ha = sess->vha->hw;
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
	qlt_schedule_sess_for_deletion(sess, 1);
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1207 1208
}

1209
/* ha->tgt.sess_lock supposed to be held on entry */
1210
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1211
{
1212 1213
	struct fc_port *sess;
	scsi_qla_host_t *vha = tgt->vha;
1214

1215 1216
	list_for_each_entry(sess, &vha->vp_fcports, list) {
		if (sess->se_sess)
1217
			qlt_schedule_sess_for_deletion(sess, 1);
1218
	}
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242

	/* At this point tgt could be already dead */
}

static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
	uint16_t *loop_id)
{
	struct qla_hw_data *ha = vha->hw;
	dma_addr_t gid_list_dma;
	struct gid_list_info *gid_list;
	char *id_iter;
	int res, rc, i;
	uint16_t entries;

	gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
	    &gid_list_dma, GFP_KERNEL);
	if (!gid_list) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
		    "qla_target(%d): DMA Alloc failed of %u\n",
		    vha->vp_idx, qla2x00_gid_list_size(ha));
		return -ENOMEM;
	}

	/* Get list of logged in devices */
1243
	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1244 1245 1246 1247
	if (rc != QLA_SUCCESS) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
		    "qla_target(%d): get_id_list() failed: %x\n",
		    vha->vp_idx, rc);
1248
		res = -EBUSY;
1249 1250 1251 1252
		goto out_free_id_list;
	}

	id_iter = (char *)gid_list;
1253
	res = -ENOENT;
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
	for (i = 0; i < entries; i++) {
		struct gid_list_info *gid = (struct gid_list_info *)id_iter;
		if ((gid->al_pa == s_id[2]) &&
		    (gid->area == s_id[1]) &&
		    (gid->domain == s_id[0])) {
			*loop_id = le16_to_cpu(gid->loop_id);
			res = 0;
			break;
		}
		id_iter += ha->gid_list_info_size;
	}

out_free_id_list:
	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
	    gid_list, gid_list_dma);
	return res;
}

/*
 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
 * Caller must put it.
 */
1276
static struct fc_port *qlt_create_sess(
1277 1278 1279 1280 1281
	struct scsi_qla_host *vha,
	fc_port_t *fcport,
	bool local)
{
	struct qla_hw_data *ha = vha->hw;
1282
	struct fc_port *sess = fcport;
1283 1284
	unsigned long flags;

1285 1286
	if (vha->vha_tgt.qla_tgt->tgt_stop)
		return NULL;
1287

1288 1289 1290 1291 1292 1293
	if (fcport->se_sess) {
		if (!kref_get_unless_zero(&sess->sess_kref)) {
			ql_dbg(ql_dbg_disc, vha, 0xffff,
			    "%s: kref_get_unless_zero failed for %8phC\n",
			    __func__, sess->port_name);
			return NULL;
1294
		}
1295
		return fcport;
1296
	}
1297
	sess->tgt = vha->vha_tgt.qla_tgt;
1298
	sess->local = local;
1299

1300 1301
	/*
	 * Under normal circumstances we want to logout from firmware when
1302 1303
	 * session eventually ends and release corresponding nport handle.
	 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1304 1305
	 * code will adjust these flags as necessary.
	 */
1306 1307
	sess->logout_on_delete = 1;
	sess->keep_nport_handle = 0;
1308
	sess->logout_completed = 0;
1309

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
	    &fcport->port_name[0], sess) < 0) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
		    "(%d) %8phC check_initiator_node_acl failed\n",
		    vha->vp_idx, fcport->port_name);
		return NULL;
	} else {
		kref_init(&fcport->sess_kref);
		/*
		 * Take an extra reference to ->sess_kref here to handle
		 * fc_port access across ->tgt.sess_lock reaquire.
		 */
		if (!kref_get_unless_zero(&sess->sess_kref)) {
			ql_dbg(ql_dbg_disc, vha, 0xffff,
			    "%s: kref_get_unless_zero failed for %8phC\n",
			    __func__, sess->port_name);
			return NULL;
		}
1328

1329 1330 1331
		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
		if (!IS_SW_RESV_ADDR(sess->d_id))
			vha->vha_tgt.qla_tgt->sess_count++;
1332

1333 1334 1335 1336 1337 1338 1339 1340
		qlt_do_generation_tick(vha, &sess->generation);
		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
	}

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
	    "Adding sess %p se_sess %p  to tgt %p sess_count %d\n",
	    sess, sess->se_sess, vha->vha_tgt.qla_tgt,
	    vha->vha_tgt.qla_tgt->sess_count);
1341 1342

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1343 1344 1345
	    "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
	    "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
	    vha->vp_idx, local ?  "local " : "", fcport->port_name,
1346 1347
	    fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
	    sess->d_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
1348 1349 1350 1351

	return sess;
}

1352 1353 1354 1355 1356 1357
/*
 * max_gen - specifies maximum session generation
 * at which this deletion requestion is still valid
 */
void
qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1358
{
1359
	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1360
	struct fc_port *sess = fcport;
1361
	unsigned long flags;
1362 1363 1364 1365

	if (!vha->hw->tgt.tgt_ops)
		return;

1366
	if (!tgt)
1367 1368
		return;

1369
	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1370
	if (tgt->tgt_stop) {
1371
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1372 1373
		return;
	}
1374
	if (!sess->se_sess) {
1375
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1376 1377 1378
		return;
	}

1379
	if (max_gen - sess->generation < 0) {
1380
		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1381 1382 1383 1384 1385 1386 1387 1388
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
		    "Ignoring stale deletion request for se_sess %p / sess %p"
		    " for port %8phC, req_gen %d, sess_gen %d\n",
		    sess->se_sess, sess, sess->port_name, max_gen,
		    sess->generation);
		return;
	}

1389 1390 1391 1392
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);

	sess->local = 1;
	qlt_schedule_sess_for_deletion(sess, false);
1393
	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
}

static inline int test_tgt_sess_count(struct qla_tgt *tgt)
{
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;
	int res;
	/*
	 * We need to protect against race, when tgt is freed before or
	 * inside wake_up()
	 */
1405
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1406
	ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1407 1408
	    "tgt %p, sess_count=%d\n",
	    tgt, tgt->sess_count);
1409
	res = (tgt->sess_count == 0);
1410
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1411 1412 1413 1414 1415

	return res;
}

/* Called by tcm_qla2xxx configfs code */
1416
int qlt_stop_phase1(struct qla_tgt *tgt)
1417 1418 1419 1420 1421
{
	struct scsi_qla_host *vha = tgt->vha;
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
	mutex_lock(&qla_tgt_mutex);
	if (!vha->fc_vport) {
		struct Scsi_Host *sh = vha->host;
		struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
		bool npiv_vports;

		spin_lock_irqsave(sh->host_lock, flags);
		npiv_vports = (fc_host->npiv_vports_inuse);
		spin_unlock_irqrestore(sh->host_lock, flags);

		if (npiv_vports) {
			mutex_unlock(&qla_tgt_mutex);
			return -EPERM;
		}
	}
1437 1438 1439
	if (tgt->tgt_stop || tgt->tgt_stopped) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
		    "Already in tgt->tgt_stop or tgt_stopped state\n");
1440 1441
		mutex_unlock(&qla_tgt_mutex);
		return -EPERM;
1442 1443 1444 1445 1446 1447 1448 1449
	}

	ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
	    vha->host_no, vha);
	/*
	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
	 * Lock is needed, because we still can get an incoming packet.
	 */
1450
	mutex_lock(&vha->vha_tgt.tgt_mutex);
1451
	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1452
	tgt->tgt_stop = 1;
1453
	qlt_clear_tgt_db(tgt);
1454
	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1455
	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1456
	mutex_unlock(&qla_tgt_mutex);
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
	    "Waiting for sess works (tgt %p)", tgt);
	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	while (!list_empty(&tgt->sess_works_list)) {
		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
		flush_scheduled_work();
		spin_lock_irqsave(&tgt->sess_work_lock, flags);
	}
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1469
	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1470 1471 1472 1473

	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));

	/* Big hammer */
1474 1475
	if (!ha->flags.host_shutting_down &&
	    (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1476 1477 1478 1479
		qlt_disable_vha(vha);

	/* Wait for sessions to clear out (just in case) */
	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1480
	return 0;
1481 1482 1483 1484 1485 1486 1487
}
EXPORT_SYMBOL(qlt_stop_phase1);

/* Called by tcm_qla2xxx configfs code */
void qlt_stop_phase2(struct qla_tgt *tgt)
{
	struct qla_hw_data *ha = tgt->ha;
1488
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1489 1490 1491
	unsigned long flags;

	if (tgt->tgt_stopped) {
1492
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1493 1494 1495 1496 1497
		    "Already in tgt->tgt_stopped state\n");
		dump_stack();
		return;
	}

1498
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1499 1500 1501
	    "Waiting for %d IRQ commands to complete (tgt %p)",
	    tgt->irq_cmd_count, tgt);

1502
	mutex_lock(&vha->vha_tgt.tgt_mutex);
1503
	spin_lock_irqsave(&ha->hardware_lock, flags);
1504
	while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
1505 1506 1507 1508 1509 1510 1511
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		udelay(2);
		spin_lock_irqsave(&ha->hardware_lock, flags);
	}
	tgt->tgt_stop = 0;
	tgt->tgt_stopped = 1;
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1512
	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1513

1514
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
1515 1516 1517 1518 1519
	    tgt);
}
EXPORT_SYMBOL(qlt_stop_phase2);

/* Called from qlt_remove_target() -> qla2x00_remove_one() */
1520
static void qlt_release(struct qla_tgt *tgt)
1521
{
1522
	scsi_qla_host_t *vha = tgt->vha;
1523

1524
	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1525 1526
		qlt_stop_phase2(tgt);

1527
	vha->vha_tgt.qla_tgt = NULL;
1528

1529
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
	    "Release of tgt %p finished\n", tgt);

	kfree(tgt);
}

/* ha->hardware_lock supposed to be held on entry */
static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
	const void *param, unsigned int param_size)
{
	struct qla_tgt_sess_work_param *prm;
	unsigned long flags;

	prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
	if (!prm) {
		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
		    "qla_target(%d): Unable to create session "
		    "work, command will be refused", 0);
		return -ENOMEM;
	}

	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
	    "Scheduling work (type %d, prm %p)"
	    " to find session for param %p (size %d, tgt %p)\n",
	    type, prm, param, param_size, tgt);

	prm->type = type;
	memcpy(&prm->tm_iocb, param, param_size);

	spin_lock_irqsave(&tgt->sess_work_lock, flags);
	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);

	schedule_work(&tgt->sess_work);

	return 0;
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_send_notify_ack(struct scsi_qla_host *vha,
	struct imm_ntfy_from_isp *ntfy,
	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
{
	struct qla_hw_data *ha = vha->hw;
	request_t *pkt;
	struct nack_to_isp *nack;

	ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);

	/* Send marker if required */
	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
		return;

	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
	if (!pkt) {
		ql_dbg(ql_dbg_tgt, vha, 0xe049,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return;
	}

1593 1594
	if (vha->vha_tgt.qla_tgt != NULL)
		vha->vha_tgt.qla_tgt->notify_ack_expected++;
1595 1596 1597 1598 1599 1600 1601

	pkt->entry_type = NOTIFY_ACK_TYPE;
	pkt->entry_count = 1;

	nack = (struct nack_to_isp *)pkt;
	nack->ox_id = ntfy->ox_id;

1602
	nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1603 1604 1605
	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
		nack->u.isp24.flags = ntfy->u.isp24.flags &
1606
			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1607 1608 1609 1610
	}
	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
	nack->u.isp24.status = ntfy->u.isp24.status;
	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1611
	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
	nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
	nack->u.isp24.srr_reject_code = srr_reject_code;
	nack->u.isp24.srr_reject_code_expl = srr_explan;
	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;

	ql_dbg(ql_dbg_tgt, vha, 0xe005,
	    "qla_target(%d): Sending 24xx Notify Ack %d\n",
	    vha->vp_idx, nack->u.isp24.status);

1624 1625
	/* Memory Barrier */
	wmb();
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
	qla2x00_start_iocbs(vha, vha->req);
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
	struct abts_recv_from_24xx *abts, uint32_t status,
	bool ids_reversed)
{
	struct qla_hw_data *ha = vha->hw;
	struct abts_resp_to_24xx *resp;
	uint32_t f_ctl;
	uint8_t *p;

	ql_dbg(ql_dbg_tgt, vha, 0xe006,
	    "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
	    ha, abts, status);

	/* Send marker if required */
	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
		return;

1649
	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
	if (!resp) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet", vha->vp_idx, __func__);
		return;
	}

	resp->entry_type = ABTS_RESP_24XX;
	resp->entry_count = 1;
	resp->nport_handle = abts->nport_handle;
	resp->vp_index = vha->vp_idx;
	resp->sof_type = abts->sof_type;
	resp->exchange_address = abts->exchange_address;
	resp->fcp_hdr_le = abts->fcp_hdr_le;
1664
	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
	    F_CTL_SEQ_INITIATIVE);
	p = (uint8_t *)&f_ctl;
	resp->fcp_hdr_le.f_ctl[0] = *p++;
	resp->fcp_hdr_le.f_ctl[1] = *p++;
	resp->fcp_hdr_le.f_ctl[2] = *p;
	if (ids_reversed) {
		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
	} else {
		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
	}
	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
	if (status == FCP_TMF_CMPL) {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
		resp->payload.ba_acct.low_seq_cnt = 0x0000;
		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
	} else {
		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
		resp->payload.ba_rjt.reason_code =
			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
		/* Other bytes are zero */
	}

1701
	vha->vha_tgt.qla_tgt->abts_resp_expected++;
1702

1703 1704
	/* Memory Barrier */
	wmb();
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
	qla2x00_start_iocbs(vha, vha->req);
}

/*
 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
 */
static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
	struct abts_resp_from_24xx_fw *entry)
{
	struct ctio7_to_24xx *ctio;

	ql_dbg(ql_dbg_tgt, vha, 0xe007,
	    "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
	/* Send marker if required */
	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
		return;

1722
	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738
	if (ctio == NULL) {
		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
		    "qla_target(%d): %s failed: unable to allocate "
		    "request packet\n", vha->vp_idx, __func__);
		return;
	}

	/*
	 * We've got on entrance firmware's response on by us generated
	 * ABTS response. So, in it ID fields are reversed.
	 */

	ctio->entry_type = CTIO_TYPE7;
	ctio->entry_count = 1;
	ctio->nport_handle = entry->nport_handle;
	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
1739
	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1740 1741 1742 1743 1744
	ctio->vp_index = vha->vp_idx;
	ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
	ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
	ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
	ctio->exchange_addr = entry->exchange_addr_to_abort;
1745 1746
	ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
					    CTIO7_FLAGS_TERMINATE);
1747
	ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);