Commit c8b09f6f authored by Christoph Hellwig's avatar Christoph Hellwig

scsi: don't set tagging state from scsi_adjust_queue_depth

Remove the tagged argument from scsi_adjust_queue_depth, and just let it
handle the queue depth.  For most drivers those two are fairly separate,
given that most modern drivers don't care about the SCSI "tagged" status
of a command at all, and many old drivers allow queuing of multiple
untagged commands in the driver.

Instead we start out with the ->simple_tags flag set before calling
->slave_configure, which is how all drivers actually looking at
->simple_tags except for one worke anyway.  The one other case looks
broken, but I've kept the behavior as-is for now.

Except for that we only change ->simple_tags from the ->change_queue_type,
and when rejecting a tag message in a single driver, so keeping this
churn out of scsi_adjust_queue_depth is a clear win.

Now that the usage of scsi_adjust_queue_depth is more obvious we can
also remove all the trivial instances in ->slave_alloc or ->slave_configure
that just set it to the cmd_per_lun default.
Signed-off-by: default avatarChristoph Hellwig <[email protected]>
Reviewed-by: default avatarMike Christie <[email protected]>
Reviewed-by: Hannes Reinecke's avatarHannes Reinecke <[email protected]>
Reviewed-by: Martin K. Petersen's avatarMartin K. Petersen <[email protected]>
parent 2ecb204d
......@@ -271,9 +271,9 @@ init_this_scsi_driver() ----+
slave_destroy() ***
------------------------------------------------------------
The mid level invokes scsi_adjust_queue_depth() with tagged queuing off and
"cmd_per_lun" for that host as the queue length. These settings can be
overridden by a slave_configure() supplied by the LLD.
The mid level invokes scsi_adjust_queue_depth() with "cmd_per_lun" for that
host as the queue length. These settings can be overridden by a
slave_configure() supplied by the LLD.
*** For scsi devices that the mid level tries to scan but do not
respond, a slave_alloc(), slave_destroy() pair is called.
......@@ -438,9 +438,6 @@ int scsi_add_host(struct Scsi_Host *shost, struct device * dev)
/**
* scsi_adjust_queue_depth - allow LLD to change queue depth on a SCSI device
* @sdev: pointer to SCSI device to change queue depth on
* @tagged: 0 - no tagged queuing
* MSG_SIMPLE_TAG - simple tagged queuing
* MSG_ORDERED_TAG - ordered tagged queuing
* @tags Number of tags allowed if tagged queuing enabled,
* or number of commands the LLD can queue up
* in non-tagged mode (as per cmd_per_lun).
......@@ -456,8 +453,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device * dev)
* Defined in: drivers/scsi/scsi.c [see source code for more notes]
*
**/
void scsi_adjust_queue_depth(struct scsi_device * sdev, int tagged,
int tags)
void scsi_adjust_queue_depth(struct scsi_device *sdev, int tags)
/**
......
......@@ -1164,7 +1164,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
depth = min(ATA_MAX_QUEUE - 1, depth);
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
scsi_adjust_queue_depth(sdev, depth);
}
blk_queue_flush_queueable(q, false);
......@@ -1282,7 +1282,7 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
if (sdev->queue_depth == queue_depth)
return -EINVAL;
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
}
......
......@@ -2278,7 +2278,7 @@ srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
max_depth = 1;
if (qdepth > max_depth)
qdepth = max_depth;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
} else if (reason == SCSI_QDEPTH_QFULL)
scsi_track_queue_full(sdev, qdepth);
else
......
......@@ -2347,7 +2347,7 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
if (qdepth > max_depth)
qdepth = max_depth;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
return sdev->queue_depth;
}
......
......@@ -37,13 +37,13 @@ static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
{
switch (reason) {
case SCSI_QDEPTH_DEFAULT:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
scsi_adjust_queue_depth(sdev, depth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, depth);
break;
case SCSI_QDEPTH_RAMP_UP:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
scsi_adjust_queue_depth(sdev, depth);
break;
default:
return -EOPNOTSUPP;
......@@ -66,9 +66,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
{
if (sdp->tagged_supported)
scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
else
scsi_adjust_queue_depth(sdp, 0, 1);
scsi_adjust_queue_depth(sdp, default_depth);
return 0;
}
......
......@@ -198,7 +198,7 @@ static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
if (queue_depth > TW_Q_LENGTH-2)
queue_depth = TW_Q_LENGTH-2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
} /* End twa_change_queue_depth() */
......
......@@ -200,7 +200,7 @@ static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
if (queue_depth > TW_Q_LENGTH-2)
queue_depth = TW_Q_LENGTH-2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
} /* End twl_change_queue_depth() */
......
......@@ -532,7 +532,7 @@ static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth,
if (queue_depth > TW_Q_LENGTH-2)
queue_depth = TW_Q_LENGTH-2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
} /* End tw_change_queue_depth() */
......
......@@ -902,8 +902,10 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
/* we're done negotiating */
NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
SCp->device->tagged_supported = 0;
scsi_adjust_queue_depth(SCp->device, 0, host->cmd_per_lun);
scsi_adjust_queue_depth(SCp->device, host->cmd_per_lun);
scsi_set_tag_type(SCp->device, 0);
} else {
shost_printk(KERN_WARNING, host,
"(%d:%d) Unexpected REJECT Message %s\n",
......@@ -2050,12 +2052,10 @@ NCR_700_slave_configure(struct scsi_device *SDp)
/* to do here: allocate memory; build a queue_full list */
if(SDp->tagged_supported) {
scsi_adjust_queue_depth(SDp, MSG_ORDERED_TAG, NCR_700_DEFAULT_TAGS);
scsi_adjust_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
} else {
/* initialise to default depth */
scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
}
if(hostdata->fast) {
/* Find the correct offset and period via domain validation */
if (!spi_initial_dv(SDp->sdev_target))
......@@ -2083,7 +2083,7 @@ NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason)
if (depth > NCR_700_MAX_TAGS)
depth = NCR_700_MAX_TAGS;
scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
scsi_adjust_queue_depth(SDp, depth);
return depth;
}
......@@ -2101,15 +2101,16 @@ static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
if (change_tag)
scsi_target_quiesce(SDp->sdev_target);
scsi_set_tag_type(SDp, tag_type);
if (!tag_type) {
/* shift back to the default unqueued number of commands
* (the user can still raise this) */
scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
scsi_adjust_queue_depth(SDp, SDp->host->cmd_per_lun);
hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
} else {
/* Here, we cleared the negotiation flag above, so this
* will force the driver to renegotiate */
scsi_adjust_queue_depth(SDp, tag_type, SDp->queue_depth);
scsi_adjust_queue_depth(SDp, SDp->queue_depth);
if (change_tag)
NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
}
......
......@@ -2327,12 +2327,12 @@ static int blogic_slaveconfig(struct scsi_device *dev)
if (qdepth == 0)
qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH;
adapter->qdepth[tgt_id] = qdepth;
scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, qdepth);
scsi_adjust_queue_depth(dev, qdepth);
} else {
adapter->tagq_ok &= ~(1 << tgt_id);
qdepth = adapter->untag_qdepth;
adapter->qdepth[tgt_id] = qdepth;
scsi_adjust_queue_depth(dev, 0, qdepth);
scsi_adjust_queue_depth(dev, qdepth);
}
qdepth = 0;
for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
......
......@@ -462,9 +462,9 @@ static int aac_slave_configure(struct scsi_device *sdev)
depth = 256;
else if (depth < 2)
depth = 2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
scsi_adjust_queue_depth(sdev, depth);
} else
scsi_adjust_queue_depth(sdev, 0, 1);
scsi_adjust_queue_depth(sdev, 1);
return 0;
}
......@@ -504,9 +504,9 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth,
depth = 256;
else if (depth < 2)
depth = 2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
scsi_adjust_queue_depth(sdev, depth);
} else
scsi_adjust_queue_depth(sdev, 0, 1);
scsi_adjust_queue_depth(sdev, 1);
return sdev->queue_depth;
}
......
......@@ -7706,7 +7706,7 @@ advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
asc_dvc->cfg->can_tagged_qng |= tid_bit;
asc_dvc->use_tagged_qng |= tid_bit;
}
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
scsi_adjust_queue_depth(sdev,
asc_dvc->max_dvc_qng[sdev->id]);
}
} else {
......@@ -7714,7 +7714,6 @@ advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
asc_dvc->cfg->can_tagged_qng &= ~tid_bit;
asc_dvc->use_tagged_qng &= ~tid_bit;
}
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
}
if ((sdev->lun == 0) &&
......@@ -7849,10 +7848,8 @@ advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
}
if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) {
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
scsi_adjust_queue_depth(sdev,
adv_dvc->max_dvc_qng);
} else {
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
}
}
......
......@@ -1469,11 +1469,8 @@ ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
case AHD_DEV_Q_BASIC:
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TASK,
dev->openings + dev->active);
break;
case AHD_DEV_Q_TAGGED:
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TASK,
scsi_adjust_queue_depth(sdev,
dev->openings + dev->active);
break;
default:
......@@ -1483,7 +1480,7 @@ ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
* serially on the controller/device. This should
* remove some latency.
*/
scsi_adjust_queue_depth(sdev, 0, 1);
scsi_adjust_queue_depth(sdev, 1);
break;
}
}
......
......@@ -1335,13 +1335,9 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
}
switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
case AHC_DEV_Q_BASIC:
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TASK,
dev->openings + dev->active);
break;
case AHC_DEV_Q_TAGGED:
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TASK,
scsi_adjust_queue_depth(sdev,
dev->openings + dev->active);
break;
default:
/*
* We allow the OS to queue 2 untagged transactions to
......@@ -1349,7 +1345,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
* serially on the controller/device. This should
* remove some latency.
*/
scsi_adjust_queue_depth(sdev, 0, 2);
scsi_adjust_queue_depth(sdev, 2);
break;
}
}
......
......@@ -122,7 +122,7 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
queue_depth = ARCMSR_MAX_CMD_PERLUN;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
}
......
......@@ -776,7 +776,7 @@ bfad_thread_workq(struct bfad_s *bfad)
static int
bfad_im_slave_configure(struct scsi_device *sdev)
{
scsi_adjust_queue_depth(sdev, 0, bfa_lun_queue_depth);
scsi_adjust_queue_depth(sdev, bfa_lun_queue_depth);
return 0;
}
......@@ -867,7 +867,6 @@ bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
if (tmp_sdev->id != sdev->id)
continue;
scsi_adjust_queue_depth(tmp_sdev,
MSG_SIMPLE_TAG,
tmp_sdev->queue_depth + 1);
itnim->last_ramp_up_time = jiffies;
......
......@@ -2241,7 +2241,7 @@ csio_slave_alloc(struct scsi_device *sdev)
static int
csio_slave_configure(struct scsi_device *sdev)
{
scsi_adjust_queue_depth(sdev, 0, csio_lun_qdepth);
scsi_adjust_queue_depth(sdev, csio_lun_qdepth);
return 0;
}
......
......@@ -415,10 +415,8 @@ static int adpt_slave_configure(struct scsi_device * device)
pHba = (adpt_hba *) host->hostdata[0];
if (host->can_queue && device->tagged_supported) {
scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
scsi_adjust_queue_depth(device,
host->can_queue - 1);
} else {
scsi_adjust_queue_depth(device, 0, 1);
}
return 0;
}
......
......@@ -946,20 +946,18 @@ static int eata2x_slave_configure(struct scsi_device *dev)
if (TLDEV(dev->type) && dev->tagged_supported) {
if (tag_mode == TAG_SIMPLE) {
scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, tqd);
tag_suffix = ", simple tags";
} else if (tag_mode == TAG_ORDERED) {
scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, tqd);
tag_suffix = ", ordered tags";
} else {
scsi_adjust_queue_depth(dev, 0, tqd);
tag_suffix = ", no tags";
}
scsi_adjust_queue_depth(dev, tqd);
} else if (TLDEV(dev->type) && linked_comm) {
scsi_adjust_queue_depth(dev, 0, tqd);
scsi_adjust_queue_depth(dev, tqd);
tag_suffix = ", untagged";
} else {
scsi_adjust_queue_depth(dev, 0, utqd);
scsi_adjust_queue_depth(dev, utqd);
tag_suffix = "";
}
......
......@@ -972,9 +972,6 @@ u8 handle_hba_ioctl(struct esas2r_adapter *a,
struct atto_ioctl *ioctl_hba);
int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
int esas2r_slave_alloc(struct scsi_device *dev);
int esas2r_slave_configure(struct scsi_device *dev);
void esas2r_slave_destroy(struct scsi_device *dev);
int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason);
long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
......
......@@ -254,9 +254,6 @@ static struct scsi_host_template driver_template = {
.use_clustering = ENABLE_CLUSTERING,
.emulated = 0,
.proc_name = ESAS2R_DRVR_NAME,
.slave_configure = esas2r_slave_configure,
.slave_alloc = esas2r_slave_alloc,
.slave_destroy = esas2r_slave_destroy,
.change_queue_depth = esas2r_change_queue_depth,
.change_queue_type = scsi_change_queue_type,
.max_sectors = 0xFFFF,
......@@ -1264,35 +1261,11 @@ int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason)
{
esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth);
scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth);
scsi_adjust_queue_depth(dev, depth);
return dev->queue_depth;
}
int esas2r_slave_alloc(struct scsi_device *dev)
{
return 0;
}
int esas2r_slave_configure(struct scsi_device *dev)
{
esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
"esas2r_slave_configure()");
if (dev->tagged_supported)
scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, cmd_per_lun);
else
scsi_adjust_queue_depth(dev, 0, cmd_per_lun);
return 0;
}
void esas2r_slave_destroy(struct scsi_device *dev)
{
esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
"esas2r_slave_destroy()");
}
void esas2r_log_request_failure(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
......
......@@ -2402,27 +2402,14 @@ static int esp_slave_configure(struct scsi_device *dev)
{
struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
int goal_tags, queue_depth;
goal_tags = 0;
if (dev->tagged_supported) {
/* XXX make this configurable somehow XXX */
goal_tags = ESP_DEFAULT_TAGS;
int goal_tags = min(ESP_DEFAULT_TAGS, ESP_MAX_TAG);
if (goal_tags > ESP_MAX_TAG)
goal_tags = ESP_MAX_TAG;
scsi_adjust_queue_depth(dev, goal_tags);
}
queue_depth = goal_tags;
if (queue_depth < dev->host->cmd_per_lun)
queue_depth = dev->host->cmd_per_lun;
if (goal_tags) {
scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, queue_depth);
} else {
scsi_adjust_queue_depth(dev, 0, queue_depth);
}
tp->flags |= ESP_TGT_DISCONNECT;
if (!spi_initial_dv(dev->sdev_target))
......
......@@ -100,7 +100,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
scsi_adjust_queue_depth(sdev, 0, fnic_max_qdepth);
scsi_adjust_queue_depth(sdev, fnic_max_qdepth);
return 0;
}
......
......@@ -4661,7 +4661,6 @@ static void gdth_flush(gdth_ha_str *ha)
/* configure lun */
static int gdth_slave_configure(struct scsi_device *sdev)
{
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
sdev->skip_ms_page_3f = 1;
sdev->skip_ms_page_8 = 1;
return 0;
......
......@@ -4165,7 +4165,7 @@ static int hpsa_change_queue_depth(struct scsi_device *sdev,
else
if (qdepth > h->nr_cmds)
qdepth = h->nr_cmds;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
return sdev->queue_depth;
}
......
......@@ -1127,7 +1127,7 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
if (queue_depth > hba->max_requests)
queue_depth = hba->max_requests;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
}
......
......@@ -2887,12 +2887,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
spin_lock_irqsave(shost->host_lock, flags);
if (sdev->type == TYPE_DISK)
sdev->allow_restart = 1;
if (sdev->tagged_supported)
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
sdev->queue_depth);
else
scsi_adjust_queue_depth(sdev, 0, sdev->queue_depth);
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
......@@ -2915,7 +2909,7 @@ static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth,
if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
qdepth = IBMVFC_MAX_CMDS_PER_LUN;
scsi_adjust_queue_depth(sdev, 0, qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
return sdev->queue_depth;
}
......
......@@ -1929,7 +1929,6 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
}
spin_unlock_irqrestore(shost->host_lock, lock_flags);
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
return 0;
}
......@@ -1951,7 +1950,7 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth,
if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
scsi_adjust_queue_depth(sdev, 0, qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
return sdev->queue_depth;
}
......
......@@ -4344,7 +4344,7 @@ static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
qdepth = IPR_MAX_CMD_PER_ATA_LUN;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
return sdev->queue_depth;
}
......@@ -4751,10 +4751,10 @@ static int ipr_slave_configure(struct scsi_device *sdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
if (ap) {
scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
scsi_adjust_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
ata_sas_slave_configure(sdev, ap);
} else
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
}
if (ioa_cfg->sis64)
sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
ipr_format_res_path(ioa_cfg,
......
......@@ -1210,7 +1210,7 @@ ips_slave_configure(struct scsi_device * SDptr)
min = ha->max_cmds / 2;
if (ha->enq->ucLogDriveCount <= 2)
min = ha->max_cmds - 1;
scsi_adjust_queue_depth(SDptr, MSG_ORDERED_TAG, min);
scsi_adjust_queue_depth(SDptr, min);
}
SDptr->skip_ms_page_8 = 1;
......
......@@ -2160,7 +2160,7 @@ int fc_slave_alloc(struct scsi_device *sdev)
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
scsi_adjust_queue_depth(sdev, 0, FC_FCP_DFLT_QUEUE_DEPTH);
scsi_adjust_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
return 0;
}
EXPORT_SYMBOL(fc_slave_alloc);
......@@ -2175,13 +2175,13 @@ int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
{
switch (reason) {
case SCSI_QDEPTH_DEFAULT:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, qdepth);
break;
case SCSI_QDEPTH_RAMP_UP:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
break;
default:
return -EOPNOTSUPP;
......
......@@ -1775,13 +1775,13 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
{
switch (reason) {
case SCSI_QDEPTH_DEFAULT:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
scsi_adjust_queue_depth(sdev, depth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, depth);
break;
case SCSI_QDEPTH_RAMP_UP:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
scsi_adjust_queue_depth(sdev, depth);
break;
default:
return -EOPNOTSUPP;
......
......@@ -940,13 +940,13 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
sas_read_port_mode_page(scsi_dev);
if (scsi_dev->tagged_supported) {
scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG, SAS_DEF_QD);
scsi_adjust_queue_depth(scsi_dev, SAS_DEF_QD);
} else {
SAS_DPRINTK("device %llx, LUN %llx doesn't support "
"TCQ\n", SAS_ADDR(dev->sas_addr),
scsi_dev->lun);
scsi_dev->tagged_supported = 0;
scsi_adjust_queue_depth(scsi_dev, 0, 1);
scsi_adjust_queue_depth(scsi_dev, 1);
}
scsi_dev->allow_restart = 1;
......@@ -967,7 +967,7 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
case SCSI_QDEPTH_RAMP_UP:
if (!sdev->tagged_supported)
depth = 1;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
scsi_adjust_queue_depth(sdev, depth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, depth);
......@@ -979,19 +979,11 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
return depth;
}
int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
int sas_change_queue_type(struct scsi_device *scsi_dev, int type)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
if (dev_is_sata(dev))
if (dev_is_sata(sdev_to_domain_dev(scsi_dev)))
return -EINVAL;
if (!scsi_dev->tagged_supported)
return 0;
scsi_adjust_queue_depth(scsi_dev, qt, scsi_dev->queue_depth);
return qt;
return scsi_change_queue_type(scsi_dev, type);
}
int sas_bios_param(struct scsi_device *scsi_dev,
......
......@@ -320,7 +320,7 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
case SCSI_QDEPTH_DEFAULT:
/* change request from sysfs, fall through */
case SCSI_QDEPTH_RAMP_UP:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
break;
case SCSI_QDEPTH_QFULL:
if (scsi_track_queue_full(sdev, qdepth) == 0)
......@@ -5598,7 +5598,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
scsi_adjust_queue_depth(sdev, 0, vport->cfg_lun_queue_depth);
scsi_adjust_queue_depth(sdev, vport->cfg_lun_queue_depth);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
......
......@@ -349,7 +349,7 @@ static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
if (qdepth > MBOX_MAX_SCSI_CMDS)
qdepth = MBOX_MAX_SCSI_CMDS;
scsi_adjust_queue_depth(sdev, 0, qdepth);
scsi_adjust_queue_depth(sdev, qdepth);
return sdev->queue_depth;
}
......
......@@ -2594,8 +2594,7 @@ static int megasas_change_queue_depth(struct scsi_device *sdev,
if (queue_depth > sdev->host->can_queue)
queue_depth = sdev->host->can_queue;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
queue_depth);
scsi_adjust_queue_depth(sdev, queue_depth);
return queue_depth;
}
......
......@@ -1222,7 +1222,7 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
max_depth = 1;