Commit d49f8a52 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This is mostly updates of the usual drivers: UFS, esp_scsi, NCR5380,
  qla2xxx, lpfc, libsas, hisi_sas.

  In addition there's a set of mostly small updates to the target
  subsystem a set of conversions to the generic DMA API, which do have
  some potential for issues in the older drivers but we'll handle those
  as case by case fixes.

  A new myrs driver for the DAC960/mylex raid controllers to replace the
  block based DAC960 which is also being removed by Jens in this merge
  window.

  Plus the usual slew of trivial changes"

[ "myrs" stands for "MYlex Raid Scsi". Obviously. Silly of me to even
  wonder. There's also a "myrb" driver, where the 'b' stands for
  'block'. Truly, somebody has got mad naming skillz. - Linus ]

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (237 commits)
  scsi: myrs: Fix the processor absent message in processor_show()
  scsi: myrs: Fix a logical vs bitwise bug
  scsi: hisi_sas: Fix NULL pointer dereference
  scsi: myrs: fix build failure on 32 bit
  scsi: fnic: replace gross legacy tag hack with blk-mq hack
  scsi: mesh: switch to generic DMA API
  scsi: ips: switch to generic DMA API
  scsi: smartpqi: fully convert to the generic DMA API
  scsi: vmw_pscsi: switch to generic DMA API
  scsi: snic: switch to generic DMA API
  scsi: qla4xxx: fully convert to the generic DMA API
  scsi: qla2xxx: fully convert to the generic DMA API
  scsi: qla1280: switch to generic DMA API
  scsi: qedi: fully convert to the generic DMA API
  scsi: qedf: fully convert to the generic DMA API
  scsi: pm8001: switch to generic DMA API
  scsi: nsp32: switch to generic DMA API
  scsi: mvsas: fully convert to the generic DMA API
  scsi: mvumi: switch to generic DMA API
  scsi: mpt3sas: switch to generic DMA API
  ...
parents bd6bf7c1 a0db8a75
......@@ -128,6 +128,26 @@ The current UFSHCD implementation supports following functionality,
In this version of UFSHCD Query requests and power management
functionality are not implemented.
4. BSG Support
------------------
This transport driver supports exchanging UFS protocol information units
(UPIUs) with a UFS device. Typically, user space will allocate
struct ufs_bsg_request and struct ufs_bsg_reply (see ufs_bsg.h) as
request_upiu and reply_upiu respectively. Filling those UPIUs should
be done in accordance with JEDEC spec UFS2.1 paragraph 10.7.
*Caveat emptor*: The driver makes no further input validations and sends the
UPIU to the device as it is. Open the bsg device in /dev/ufs-bsg and
send SG_IO with the applicable sg_io_v4:
io_hdr_v4.guard = 'Q';
io_hdr_v4.protocol = BSG_PROTOCOL_SCSI;
io_hdr_v4.subprotocol = BSG_SUB_PROTOCOL_SCSI_TRANSPORT;
io_hdr_v4.response = (__u64)reply_upiu;
io_hdr_v4.max_response_len = reply_len;
io_hdr_v4.request_len = request_len;
io_hdr_v4.request = (__u64)request_upiu;
UFS Specifications can be found at,
UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
......@@ -4055,7 +4055,7 @@ M: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/cxlflash/
F: include/uapi/scsi/cxlflash_ioctls.h
F: include/uapi/scsi/cxlflash_ioctl.h
F: Documentation/powerpc/cxlflash.txt
CYBERPRO FB DRIVER
......@@ -10011,6 +10011,13 @@ S: Supported
F: drivers/gpu/drm/mxsfb/
F: Documentation/devicetree/bindings/display/mxsfb.txt
MYLEX DAC960 PCI RAID Controller
M: Hannes Reinecke <hare@kernel.org>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/myrb.*
F: drivers/scsi/myrs.*
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
M: Chris Lee <christopher.lee@cspi.com>
L: netdev@vger.kernel.org
......
......@@ -2004,7 +2004,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_6
U64 LinkFailureCount; /* 50h */
U64 LossOfSyncCount; /* 58h */
U64 LossOfSignalCount; /* 60h */
U64 PrimativeSeqErrCount; /* 68h */
U64 PrimitiveSeqErrCount; /* 68h */
U64 InvalidTxWordCount; /* 70h */
U64 InvalidCrcCount; /* 78h */
U64 FcpInitiatorIoCount; /* 80h */
......
......@@ -335,11 +335,11 @@ static int mpt_remove_dead_ioc_func(void *arg)
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
struct pci_dev *pdev;
if ((ioc == NULL))
if (!ioc)
return -1;
pdev = ioc->pcidev;
if ((pdev == NULL))
if (!pdev)
return -1;
pci_stop_and_remove_bus_device_locked(pdev);
......@@ -7570,11 +7570,11 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
u8 phy_num = (u8)(evData0);
u8 port_num = (u8)(evData0 >> 8);
u8 port_width = (u8)(evData0 >> 16);
u8 primative = (u8)(evData0 >> 24);
u8 primitive = (u8)(evData0 >> 24);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Broadcase Primative: phy=%d port=%d "
"width=%d primative=0x%02x",
phy_num, port_num, port_width, primative);
"SAS Broadcast Primitive: phy=%d port=%d "
"width=%d primitive=0x%02x",
phy_num, port_num, port_width, primitive);
break;
}
......
......@@ -129,7 +129,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc,
static void mptsas_send_expander_event(struct fw_event_work *fw_event);
static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
static void mptsas_broadcast_primitive_work(struct fw_event_work *fw_event);
static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
void mptsas_schedule_target_reset(void *ioc);
......@@ -1665,7 +1665,7 @@ mptsas_firmware_event_work(struct work_struct *work)
mptsas_free_fw_event(ioc, fw_event);
break;
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
mptsas_broadcast_primative_work(fw_event);
mptsas_broadcast_primitive_work(fw_event);
break;
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
mptsas_send_expander_event(fw_event);
......@@ -4826,13 +4826,13 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
}
/**
* mptsas_broadcast_primative_work - Handle broadcast primitives
* mptsas_broadcast_primitive_work - Handle broadcast primitives
* @work: work queue payload containing info describing the event
*
* this will be handled in workqueue context.
*/
static void
mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc = fw_event->ioc;
MPT_FRAME_HDR *mf;
......
......@@ -518,7 +518,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
......@@ -526,7 +527,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
cpu_addr, dma_handle);
goto out;
}
......@@ -1027,16 +1029,16 @@ static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_
static void twa_free_device_extension(TW_Device_Extension *tw_dev)
{
if (tw_dev->command_packet_virt[0])
pci_free_consistent(tw_dev->tw_pci_dev,
sizeof(TW_Command_Full)*TW_Q_LENGTH,
tw_dev->command_packet_virt[0],
tw_dev->command_packet_phys[0]);
dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Full) * TW_Q_LENGTH,
tw_dev->command_packet_virt[0],
tw_dev->command_packet_phys[0]);
if (tw_dev->generic_buffer_virt[0])
pci_free_consistent(tw_dev->tw_pci_dev,
TW_SECTOR_SIZE*TW_Q_LENGTH,
tw_dev->generic_buffer_virt[0],
tw_dev->generic_buffer_phys[0]);
dma_free_coherent(&tw_dev->tw_pci_dev->dev,
TW_SECTOR_SIZE * TW_Q_LENGTH,
tw_dev->generic_buffer_virt[0],
tw_dev->generic_buffer_phys[0]);
kfree(tw_dev->event_queue[0]);
} /* End twa_free_device_extension() */
......@@ -2015,14 +2017,12 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
}
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
}
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) {
......@@ -2237,14 +2237,12 @@ static int twa_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
}
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
}
/* Initialize the card */
if (twa_reset_sequence(tw_dev, 0)) {
......
......@@ -644,8 +644,8 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH,
&dma_handle);
cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
......@@ -899,19 +899,19 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
static void twl_free_device_extension(TW_Device_Extension *tw_dev)
{
if (tw_dev->command_packet_virt[0])
pci_free_consistent(tw_dev->tw_pci_dev,
dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Full)*TW_Q_LENGTH,
tw_dev->command_packet_virt[0],
tw_dev->command_packet_phys[0]);
if (tw_dev->generic_buffer_virt[0])
pci_free_consistent(tw_dev->tw_pci_dev,
dma_free_coherent(&tw_dev->tw_pci_dev->dev,
TW_SECTOR_SIZE*TW_Q_LENGTH,
tw_dev->generic_buffer_virt[0],
tw_dev->generic_buffer_phys[0]);
if (tw_dev->sense_buffer_virt[0])
pci_free_consistent(tw_dev->tw_pci_dev,
dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Apache_Header)*
TW_Q_LENGTH,
tw_dev->sense_buffer_virt[0],
......@@ -1571,14 +1571,12 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
}
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
}
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) {
......@@ -1805,14 +1803,12 @@ static int twl_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
}
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
}
/* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) {
......
......@@ -834,15 +834,17 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n");
cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (cpu_addr == NULL) {
printk(KERN_WARNING "3w-xxxx: pci_alloc_consistent() failed.\n");
printk(KERN_WARNING "3w-xxxx: dma_alloc_coherent() failed.\n");
return 1;
}
if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) {
printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n");
pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
cpu_addr, dma_handle);
return 1;
}
......@@ -1062,10 +1064,16 @@ static void tw_free_device_extension(TW_Device_Extension *tw_dev)
/* Free command packet and generic buffer memory */
if (tw_dev->command_packet_virtual_address[0])
pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Command)*TW_Q_LENGTH, tw_dev->command_packet_virtual_address[0], tw_dev->command_packet_physical_address[0]);
dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command) * TW_Q_LENGTH,
tw_dev->command_packet_virtual_address[0],
tw_dev->command_packet_physical_address[0]);
if (tw_dev->alignment_virtual_address[0])
pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Sector)*TW_Q_LENGTH, tw_dev->alignment_virtual_address[0], tw_dev->alignment_physical_address[0]);
dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Sector) * TW_Q_LENGTH,
tw_dev->alignment_virtual_address[0],
tw_dev->alignment_physical_address[0]);
} /* End tw_free_device_extension() */
/* This function will send an initconnection command to controller */
......@@ -2260,7 +2268,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
retval = pci_set_dma_mask(pdev, TW_DMA_MASK);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
printk(KERN_WARNING "3w-xxxx: Failed to set dma mask.");
goto out_disable_device;
......
......@@ -230,7 +230,6 @@ static unsigned char tw_sense_table[][4] =
#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
#define TW_IOCTL_CHRDEV_FREE -1
#define TW_DMA_MASK DMA_BIT_MASK(32)
#define TW_MAX_CDB_LEN 16
/* Bitmask macros to eliminate bitfields */
......
......@@ -90,7 +90,7 @@ struct NCR_700_Device_Parameters {
/* The SYNC negotiation sequence looks like:
*
* If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
* initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION
* initial identify for the device and set DEV_BEGIN_SYNC_NEGOTIATION
* If we get an SDTR reply, work out the SXFER parameters, squirrel
* them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set
* DEV_NEGOTIATED_SYNC. If we get a REJECT msg, squirrel
......
......@@ -201,8 +201,8 @@ static bool __init blogic_create_initccbs(struct blogic_adapter *adapter)
dma_addr_t blkp;
while (adapter->alloc_ccbs < adapter->initccbs) {
blk_pointer = pci_alloc_consistent(adapter->pci_device,
blk_size, &blkp);
blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
blk_size, &blkp, GFP_KERNEL);
if (blk_pointer == NULL) {
blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
adapter);
......@@ -227,15 +227,16 @@ static void blogic_destroy_ccbs(struct blogic_adapter *adapter)
next_ccb = ccb->next_all;
if (ccb->allocgrp_head) {
if (lastccb)
pci_free_consistent(adapter->pci_device,
dma_free_coherent(&adapter->pci_device->dev,
lastccb->allocgrp_size, lastccb,
lastccb->allocgrp_head);
lastccb = ccb;
}
}
if (lastccb)
pci_free_consistent(adapter->pci_device, lastccb->allocgrp_size,
lastccb, lastccb->allocgrp_head);
dma_free_coherent(&adapter->pci_device->dev,
lastccb->allocgrp_size, lastccb,
lastccb->allocgrp_head);
}
......@@ -256,8 +257,8 @@ static void blogic_create_addlccbs(struct blogic_adapter *adapter,
if (addl_ccbs <= 0)
return;
while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) {
blk_pointer = pci_alloc_consistent(adapter->pci_device,
blk_size, &blkp);
blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
blk_size, &blkp, GFP_KERNEL);
if (blk_pointer == NULL)
break;
blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp);
......@@ -318,8 +319,8 @@ static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
if (ccb->command != NULL)
scsi_dma_unmap(ccb->command);
if (dma_unmap)
pci_unmap_single(adapter->pci_device, ccb->sensedata,
ccb->sense_datalen, PCI_DMA_FROMDEVICE);
dma_unmap_single(&adapter->pci_device->dev, ccb->sensedata,
ccb->sense_datalen, DMA_FROM_DEVICE);
ccb->command = NULL;
ccb->status = BLOGIC_CCB_FREE;
......@@ -712,7 +713,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
......@@ -895,7 +896,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
......@@ -952,7 +953,7 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
......@@ -2040,7 +2041,7 @@ static void blogic_relres(struct blogic_adapter *adapter)
Release any allocated memory structs not released elsewhere
*/
if (adapter->mbox_space)
pci_free_consistent(adapter->pci_device, adapter->mbox_sz,
dma_free_coherent(&adapter->pci_device->dev, adapter->mbox_sz,
adapter->mbox_space, adapter->mbox_space_handle);
pci_dev_put(adapter->pci_device);
adapter->mbox_space = NULL;
......@@ -2092,8 +2093,9 @@ static bool blogic_initadapter(struct blogic_adapter *adapter)
Initialize the Outgoing and Incoming Mailbox pointers.
*/
adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox));
adapter->mbox_space = pci_alloc_consistent(adapter->pci_device,
adapter->mbox_sz, &adapter->mbox_space_handle);
adapter->mbox_space = dma_alloc_coherent(&adapter->pci_device->dev,
adapter->mbox_sz, &adapter->mbox_space_handle,
GFP_KERNEL);
if (adapter->mbox_space == NULL)
return blogic_failure(adapter, "MAILBOX ALLOCATION");
adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space;
......@@ -3183,9 +3185,9 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
memcpy(ccb->cdb, cdb, cdblen);
ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
ccb->command = command;
sense_buf = pci_map_single(adapter->pci_device,
sense_buf = dma_map_single(&adapter->pci_device->dev,
command->sense_buffer, ccb->sense_datalen,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
blogic_err("DMA mapping for sense data buffer failed\n",
adapter);
......
......@@ -2944,7 +2944,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
}
if (currSCCB->Lun == 0x00) {
if ((currSCCB->Sccb_scsistat == SELECT_SN_ST)) {
if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
currTar_Info->TarStatus |=
(unsigned char)SYNC_SUPPORTED;
......@@ -2953,8 +2953,8 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
~EE_SYNC_MASK;
}
else if ((currSCCB->Sccb_scsistat ==
SELECT_WN_ST)) {
else if (currSCCB->Sccb_scsistat ==
SELECT_WN_ST) {
currTar_Info->TarStatus =
(currTar_Info->
......
......@@ -42,6 +42,9 @@ config SCSI_DMA
bool
default n
config SCSI_ESP_PIO
bool
config SCSI_NETLINK
bool
default n
......@@ -557,6 +560,36 @@ config SCSI_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may not
wish to include it.
config SCSI_MYRB
tristate "Mylex DAC960/DAC1100 PCI RAID Controller (Block Interface)"
depends on PCI
select RAID_ATTRS
help
This driver adds support for the Mylex DAC960, AcceleRAID, and
eXtremeRAID PCI RAID controllers. This driver supports the
older, block based interface.
This driver is a reimplementation of the original DAC960
driver. If you have used the DAC960 driver you should enable
this module.
To compile this driver as a module, choose M here: the
module will be called myrb.
config SCSI_MYRS
tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
depends on PCI
select RAID_ATTRS
help
This driver adds support for the Mylex DAC960, AcceleRAID, and
eXtremeRAID PCI RAID controllers. This driver supports the
newer, SCSI-based interface only.
This driver is a reimplementation of the original DAC960
driver. If you have used the DAC960 driver you should enable
this module.
To compile this driver as a module, choose M here: the
module will be called myrs.
config VMWARE_PVSCSI
tristate "VMware PVSCSI driver support"
depends on PCI && SCSI && X86
......@@ -1332,6 +1365,7 @@ config SCSI_ZORRO_ESP
tristate "Zorro ESP SCSI support"
depends on ZORRO && SCSI
select SCSI_SPI_ATTRS
select SCSI_ESP_PIO
help
Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
expansion boards for the Amiga.
......@@ -1374,6 +1408,7 @@ config SCSI_MAC_ESP
tristate "Macintosh NCR53c9[46] SCSI"
depends on MAC && SCSI
select SCSI_SPI_ATTRS
select SCSI_ESP_PIO
help
This is the NCR 53c9x SCSI controller found on most of the 68040
based Macintoshes.
......
......@@ -106,6 +106,8 @@ obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
obj-$(CONFIG_SCSI_MESH) += mesh.o
obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
obj-$(CONFIG_SCSI_MYRB) += myrb.o
obj-$(CONFIG_SCSI_MYRS) += myrs.o
obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
......
This diff is collapsed.
......@@ -275,7 +275,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
static void NCR5380_main(struct work_struct *work);
static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance);
static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
......
......@@ -1094,7 +1094,7 @@ static int inia100_probe_one(struct pci_dev *pdev,
if (pci_enable_device(pdev))
goto out;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "Unable to set 32bit DMA "
"on inia100 adapter, ignoring.\n");
goto out_disable_device;
......@@ -1124,7 +1124,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys);
host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys,
GFP_KERNEL);
if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
......@@ -1132,7 +1133,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys);
host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys,
GFP_KERNEL);
if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
......@@ -1177,10 +1179,12 @@ static int inia100_probe_one(struct pci_dev *pdev,
out_free_irq:
free_irq(shost->irq, shost);
out_free_escb_array:
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
dma_free_coherent(&pdev->dev,
ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
out_free_scb_array:
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
dma_free_coherent(&pdev->dev,
ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
out_host_put:
scsi_host_put(shost);
......@@ -1200,9 +1204,11 @@ static void inia100_remove_one(struct pci_dev *pdev)
scsi_remove_host(shost);
free_irq(shost->irq, shost);
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
dma_free_coherent(&pdev->dev,
ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
dma_free_coherent(&pdev->dev,
ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
release_region(shost->io_port, 256);
......
......@@ -3480,7 +3480,6 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
static void aac_srb_callback(void *context, struct fib * fibptr)
{
struct aac_dev *dev;
struct aac_srb_reply *srbreply;
struct scsi_cmnd *scsicmd;
......@@ -3491,8 +3490,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr)