diff --git a/Documentation/blockdev/cciss.txt b/Documentation/blockdev/cciss.txt
deleted file mode 100644
index 3a5477cc456efd25168fac0df6236b4e91507435..0000000000000000000000000000000000000000
--- a/Documentation/blockdev/cciss.txt
+++ /dev/null
@@ -1,194 +0,0 @@
-This driver is for Compaq's SMART Array Controllers.
-
-Supported Cards:
-----------------
-
-This driver is known to work with the following cards:
-
-	* SA 5300
-	* SA 5i 
-	* SA 532
-	* SA 5312
-	* SA 641
-	* SA 642
-	* SA 6400
-	* SA 6400 U320 Expansion Module
-	* SA 6i
-	* SA P600
-	* SA P800
-	* SA E400
-	* SA P400i
-	* SA E200
-	* SA E200i
-	* SA E500
-	* SA P700m
-	* SA P212
-	* SA P410
-	* SA P410i
-	* SA P411
-	* SA P812
-	* SA P712m
-	* SA P711m
-
-Detecting drive failures:
--------------------------
-
-To get the status of logical volumes and to detect physical drive
-failures, you can use the cciss_vol_status program found here:
-http://cciss.sourceforge.net/#cciss_utils
-
-Device Naming:
---------------
-
-If nodes are not already created in the /dev/cciss directory, run as root:
-
-# cd /dev
-# ./MAKEDEV cciss
-
-You need some entries in /dev for the cciss device.  The MAKEDEV script
-can make device nodes for you automatically.  Currently the device setup
-is as follows:
-
-Major numbers:
-	104	cciss0	
-	105	cciss1	
-	106	cciss2
-	105	cciss3
-	108	cciss4
-	109	cciss5
-	110	cciss6
-	111	cciss7
-
-Minor numbers:
-        b7 b6 b5 b4 b3 b2 b1 b0
-        |----+----| |----+----|
-             |           |
-             |           +-------- Partition ID (0=wholedev, 1-15 partition)
-             |
-             +-------------------- Logical Volume number
-
-The device naming scheme is:
-/dev/cciss/c0d0			Controller 0, disk 0, whole device
-/dev/cciss/c0d0p1		Controller 0, disk 0, partition 1
-/dev/cciss/c0d0p2		Controller 0, disk 0, partition 2
-/dev/cciss/c0d0p3		Controller 0, disk 0, partition 3
-
-/dev/cciss/c1d1			Controller 1, disk 1, whole device
-/dev/cciss/c1d1p1		Controller 1, disk 1, partition 1
-/dev/cciss/c1d1p2		Controller 1, disk 1, partition 2
-/dev/cciss/c1d1p3		Controller 1, disk 1, partition 3
-
-CCISS simple mode support
--------------------------
-
-The "cciss_simple_mode=1" boot parameter may be used to prevent the driver
-from putting the controller into "performant" mode. The difference is that
-with simple mode, each command completion requires an interrupt, while with
-"performant mode" (the default, and ordinarily better performing) it is
-possible to have multiple command completions indicated by a single
-interrupt.
-
-SCSI tape drive and medium changer support
-------------------------------------------
-
-SCSI sequential access devices and medium changer devices are supported and 
-appropriate device nodes are automatically created.  (e.g.  
-/dev/st0, /dev/st1, etc.  See the "st" man page for more details.) 
-You must enable "SCSI tape drive support for Smart Array 5xxx" and 
-"SCSI support" in your kernel configuration to be able to use SCSI
-tape drives with your Smart Array 5xxx controller.
-
-Additionally, note that the driver will engage the SCSI core at init
-time if any tape drives or medium changers are detected.  The driver may
-also be directed to dynamically engage the SCSI core via the /proc filesystem
-entry which the "block" side of the driver creates as
-/proc/driver/cciss/cciss* at runtime.  This is best done via a script.
-
-For example:
-
-	for x in /proc/driver/cciss/cciss[0-9]*
-	do
-		echo "engage scsi" > $x
-	done
-
-Once the SCSI core is engaged by the driver, it cannot be disengaged 
-(except by unloading the driver, if it happens to be linked as a module.)
-
-Note also that if no sequential access devices or medium changers are
-detected, the SCSI core will not be engaged by the action of the above
-script.
-
-Hot plug support for SCSI tape drives
--------------------------------------
-
-Hot plugging of SCSI tape drives is supported, with some caveats.
-The cciss driver must be informed that changes to the SCSI bus
-have been made.  This may be done via the /proc filesystem.
-For example:
-
-	echo "rescan" > /proc/scsi/cciss0/1
-
-This causes the driver to query the adapter about changes to the
-physical SCSI buses and/or fibre channel arbitrated loop and the
-driver to make note of any new or removed sequential access devices
-or medium changers.  The driver will output messages indicating what 
-devices have been added or removed and the controller, bus, target and 
-lun used to address the device.  It then notifies the SCSI mid layer
-of these changes.
-
-Note that the naming convention of the /proc filesystem entries 
-contains a number in addition to the driver name.  (E.g. "cciss0" 
-instead of just "cciss" which you might expect.)
-
-Note: ONLY sequential access devices and medium changers are presented 
-as SCSI devices to the SCSI mid layer by the cciss driver.  Specifically, 
-physical SCSI disk drives are NOT presented to the SCSI mid layer.  The 
-physical SCSI disk drives are controlled directly by the array controller 
-hardware and it is important to prevent the kernel from attempting to directly
-access these devices too, as if the array controller were merely a SCSI 
-controller in the same way that we are allowing it to access SCSI tape drives.
-
-SCSI error handling for tape drives and medium changers
--------------------------------------------------------
-
-The linux SCSI mid layer provides an error handling protocol which
-kicks into gear whenever a SCSI command fails to complete within a
-certain amount of time (which can vary depending on the command).
-The cciss driver participates in this protocol to some extent.  The
-normal protocol is a four step process.  First the device is told
-to abort the command.  If that doesn't work, the device is reset.
-If that doesn't work, the SCSI bus is reset.  If that doesn't work
-the host bus adapter is reset.  Because the cciss driver is a block
-driver as well as a SCSI driver and only the tape drives and medium
-changers are presented to the SCSI mid layer, and unlike more 
-straightforward SCSI drivers, disk i/o continues through the block
-side during the SCSI error recovery process, the cciss driver only
-implements the first two of these actions, aborting the command, and
-resetting the device.  Additionally, most tape drives will not oblige 
-in aborting commands, and sometimes it appears they will not even 
-obey a reset command, though in most circumstances they will.  In
-the case that the command cannot be aborted and the device cannot be 
-reset, the device will be set offline.
-
-In the event the error handling code is triggered and a tape drive is
-successfully reset or the tardy command is successfully aborted, the 
-tape drive may still not allow i/o to continue until some command
-is issued which positions the tape to a known position.  Typically you
-must rewind the tape (by issuing "mt -f /dev/st0 rewind" for example)
-before i/o can proceed again to a tape drive which was reset.
-
-There is a cciss_tape_cmds module parameter which can be used to make cciss
-allocate more commands for use by tape drives.  Ordinarily only a few commands
-(6) are allocated for tape drives because tape drives are slow and
-infrequently used and the primary purpose of Smart Array controllers is to
-act as a RAID controller for disk drives, so the vast majority of commands
-are allocated for disk devices.  However, if you have more than a few tape
-drives attached to a smart array, the default number of commands may not be
-enough (for example, if you have 8 tape drives, you could only rewind 6
-at one time with the default number of commands.)  The cciss_tape_cmds module
-parameter allows more commands (up to 16 more) to be allocated for use by
-tape drives.  For example:
-
-        insmod cciss.ko cciss_tape_cmds=16
-
-Or, as a kernel boot parameter passed in via grub:  cciss.cciss_tape_cmds=8
diff --git a/MAINTAINERS b/MAINTAINERS
index 259d32d8c84b4e3c47bb947d6d365afa1151b0f3..bf206bd9f056c6fdc9d49f2d24d9f7a634c12db5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6093,16 +6093,6 @@ F:	drivers/scsi/hpsa*.[ch]
 F:	include/linux/cciss*.h
 F:	include/uapi/linux/cciss*.h
 
-HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
-M:	Don Brace <don.brace@microsemi.com>
-L:	esc.storagedev@microsemi.com
-L:	linux-scsi@vger.kernel.org
-S:	Supported
-F:	Documentation/blockdev/cciss.txt
-F:	drivers/block/cciss*
-F:	include/linux/cciss_ioctl.h
-F:	include/uapi/linux/cciss_ioctl.h
-
 HFI1 DRIVER
 M:	Mike Marciniszyn <mike.marciniszyn@intel.com>
 M:	Dennis Dalessandro <dennis.dalessandro@intel.com>
@@ -11609,6 +11599,7 @@ F:	drivers/s390/crypto/
 
 S390 ZFCP DRIVER
 M:	Steffen Maier <maier@linux.vnet.ibm.com>
+M:	Benjamin Block <bblock@linux.vnet.ibm.com>
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
 S:	Supported
@@ -13695,8 +13686,7 @@ F:	Documentation/scsi/ufs.txt
 F:	drivers/scsi/ufs/
 
 UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
-M:	Manjunath M Bettegowda <manjumb@synopsys.com>
-M:	Prabu Thangamuthu <prabut@synopsys.com>
+M:	Joao Pinto <jpinto@synopsys.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
 F:	drivers/scsi/ufs/*dwc*
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index dd56d7460cb91d504aa22ae9215d00f97c844359..c82408c7cc3c91181f91d32c0147e18781506691 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -239,8 +239,9 @@ static void bsg_exit_rq(struct request_queue *q, struct request *req)
  * @job_fn: bsg job handler
  * @dd_job_size: size of LLD data needed for each job
  */
-struct request_queue *bsg_setup_queue(struct device *dev, char *name,
-		bsg_job_fn *job_fn, int dd_job_size)
+struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
+		bsg_job_fn *job_fn, int dd_job_size,
+		void (*release)(struct device *))
 {
 	struct request_queue *q;
 	int ret;
@@ -264,7 +265,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
 	blk_queue_softirq_done(q, bsg_softirq_done);
 	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
 
-	ret = bsg_register_queue(q, dev, name, NULL);
+	ret = bsg_register_queue(q, dev, name, release);
 	if (ret) {
 		printk(KERN_ERR "%s: bsg interface failed to "
 		       "initialize - register queue\n", dev->kobj.name);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 104180e3c55e317e76480c8384ca4297acc6b8f3..4a438b8abe27ad9c1774c89f608bd47fecc295a3 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -112,33 +112,6 @@ source "drivers/block/mtip32xx/Kconfig"
 
 source "drivers/block/zram/Kconfig"
 
-config BLK_CPQ_CISS_DA
-	tristate "Compaq Smart Array 5xxx support"
-	depends on PCI
-	select CHECK_SIGNATURE
-	select BLK_SCSI_REQUEST
-	help
-	  This is the driver for Compaq Smart Array 5xxx controllers.
-	  Everyone using these boards should say Y here.
-	  See <file:Documentation/blockdev/cciss.txt> for the current list of
-	  boards supported by this driver, and for further information
-	  on the use of this driver.
-
-config CISS_SCSI_TAPE
-	bool "SCSI tape drive support for Smart Array 5xxx"
-	depends on BLK_CPQ_CISS_DA && PROC_FS
-	depends on SCSI=y || SCSI=BLK_CPQ_CISS_DA
-	help
-	  When enabled (Y), this option allows SCSI tape drives and SCSI medium
-	  changers (tape robots) to be accessed via a Compaq 5xxx array 
-	  controller.  (See <file:Documentation/blockdev/cciss.txt> for more details.)
-
-	  "SCSI support" and "SCSI tape support" must also be enabled for this 
-	  option to work.
-
-	  When this option is disabled (N), the SCSI portion of the driver 
-	  is not compiled.
-
 config BLK_DEV_DAC960
 	tristate "Mylex DAC960/DAC1100 PCI RAID Controller support"
 	depends on PCI
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ec8c36897b753f1c558c5db801807820515b5b35..1f456d86a1907c3085e3ff558f7fd285b3af76f3 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_ATARI_FLOPPY)	+= ataflop.o
 obj-$(CONFIG_AMIGA_Z2RAM)	+= z2ram.o
 obj-$(CONFIG_BLK_DEV_RAM)	+= brd.o
 obj-$(CONFIG_BLK_DEV_LOOP)	+= loop.o
-obj-$(CONFIG_BLK_CPQ_CISS_DA)  += cciss.o
 obj-$(CONFIG_BLK_DEV_DAC960)	+= DAC960.o
 obj-$(CONFIG_XILINX_SYSACE)	+= xsysace.o
 obj-$(CONFIG_CDROM_PKTCDVD)	+= pktcdvd.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
deleted file mode 100644
index 678af946be30c9b9a0861e335a32952e07aa8410..0000000000000000000000000000000000000000
--- a/drivers/block/cciss.c
+++ /dev/null
@@ -1,5415 +0,0 @@
-/*
- *    Disk Array driver for HP Smart Array controllers.
- *    (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
- *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- *    General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- *    02111-1307, USA.
- *
- *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/major.h>
-#include <linux/fs.h>
-#include <linux/bio.h>
-#include <linux/blkpg.h>
-#include <linux/timer.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/hdreg.h>
-#include <linux/spinlock.h>
-#include <linux/compat.h>
-#include <linux/mutex.h>
-#include <linux/bitmap.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/blkdev.h>
-#include <linux/genhd.h>
-#include <linux/completion.h>
-#include <scsi/scsi.h>
-#include <scsi/sg.h>
-#include <scsi/scsi_ioctl.h>
-#include <scsi/scsi_request.h>
-#include <linux/cdrom.h>
-#include <linux/scatterlist.h>
-#include <linux/kthread.h>
-
-#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 3.6.26)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26)
-
-/* Embedded module documentation macros - see modules.h */
-MODULE_AUTHOR("Hewlett-Packard Company");
-MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
-MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
-MODULE_VERSION("3.6.26");
-MODULE_LICENSE("GPL");
-static int cciss_tape_cmds = 6;
-module_param(cciss_tape_cmds, int, 0644);
-MODULE_PARM_DESC(cciss_tape_cmds,
-	"number of commands to allocate for tape devices (default: 6)");
-static int cciss_simple_mode;
-module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(cciss_simple_mode,
-	"Use 'simple mode' rather than 'performant mode'");
-
-static int cciss_allow_hpsa;
-module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(cciss_allow_hpsa,
-	"Prevent cciss driver from accessing hardware known to be "
-	" supported by the hpsa driver");
-
-static DEFINE_MUTEX(cciss_mutex);
-static struct proc_dir_entry *proc_cciss;
-
-#include "cciss_cmd.h"
-#include "cciss.h"
-#include <linux/cciss_ioctl.h>
-
-/* define the PCI info for the cards we can control */
-static const struct pci_device_id cciss_pci_device_id[] = {
-	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,  0x0E11, 0x4070},
-	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
-	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
-	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
-	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
-	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
-	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
-	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
-	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSA,     0x103C, 0x3225},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3223},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3234},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3235},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3211},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3212},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3213},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3214},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3215},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3237},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x323D},
-	{0,}
-};
-
-MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
-
-/*  board_id = Subsystem Device ID & Vendor ID
- *  product = Marketing Name for the board
- *  access = Address of the struct of function pointers
- */
-static struct board_type products[] = {
-	{0x40700E11, "Smart Array 5300", &SA5_access},
-	{0x40800E11, "Smart Array 5i", &SA5B_access},
-	{0x40820E11, "Smart Array 532", &SA5B_access},
-	{0x40830E11, "Smart Array 5312", &SA5B_access},
-	{0x409A0E11, "Smart Array 641", &SA5_access},
-	{0x409B0E11, "Smart Array 642", &SA5_access},
-	{0x409C0E11, "Smart Array 6400", &SA5_access},
-	{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
-	{0x40910E11, "Smart Array 6i", &SA5_access},
-	{0x3225103C, "Smart Array P600", &SA5_access},
-	{0x3223103C, "Smart Array P800", &SA5_access},
-	{0x3234103C, "Smart Array P400", &SA5_access},
-	{0x3235103C, "Smart Array P400i", &SA5_access},
-	{0x3211103C, "Smart Array E200i", &SA5_access},
-	{0x3212103C, "Smart Array E200", &SA5_access},
-	{0x3213103C, "Smart Array E200i", &SA5_access},
-	{0x3214103C, "Smart Array E200i", &SA5_access},
-	{0x3215103C, "Smart Array E200i", &SA5_access},
-	{0x3237103C, "Smart Array E500", &SA5_access},
-	{0x323D103C, "Smart Array P700m", &SA5_access},
-};
-
-/* How long to wait (in milliseconds) for board to go into simple mode */
-#define MAX_CONFIG_WAIT 30000
-#define MAX_IOCTL_CONFIG_WAIT 1000
-
-/*define how many times we will try a command because of bus resets */
-#define MAX_CMD_RETRIES 3
-
-#define MAX_CTLR	32
-
-/* Originally cciss driver only supports 8 major numbers */
-#define MAX_CTLR_ORIG 	8
-
-static ctlr_info_t *hba[MAX_CTLR];
-
-static struct task_struct *cciss_scan_thread;
-static DEFINE_MUTEX(scan_mutex);
-static LIST_HEAD(scan_q);
-
-static void do_cciss_request(struct request_queue *q);
-static irqreturn_t do_cciss_intx(int irq, void *dev_id);
-static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
-static int cciss_open(struct block_device *bdev, fmode_t mode);
-static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
-static void cciss_release(struct gendisk *disk, fmode_t mode);
-static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
-		       unsigned int cmd, unsigned long arg);
-static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static int cciss_revalidate(struct gendisk *disk);
-static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
-static int deregister_disk(ctlr_info_t *h, int drv_index,
-			   int clear_all, int via_ioctl);
-
-static void cciss_read_capacity(ctlr_info_t *h, int logvol,
-			sector_t *total_size, unsigned int *block_size);
-static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
-			sector_t *total_size, unsigned int *block_size);
-static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
-			sector_t total_size,
-			unsigned int block_size, InquiryData_struct *inq_buff,
-				   drive_info_struct *drv);
-static void cciss_interrupt_mode(ctlr_info_t *);
-static int cciss_enter_simple_mode(struct ctlr_info *h);
-static void start_io(ctlr_info_t *h);
-static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
-			__u8 page_code, unsigned char scsi3addr[],
-			int cmd_type);
-static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
-	int attempt_retry);
-static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
-
-static int add_to_scan_list(struct ctlr_info *h);
-static int scan_thread(void *data);
-static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
-static void cciss_hba_release(struct device *dev);
-static void cciss_device_release(struct device *dev);
-static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
-static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
-static inline u32 next_command(ctlr_info_t *h);
-static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
-				u32 *cfg_base_addr, u64 *cfg_base_addr_index,
-				u64 *cfg_offset);
-static int cciss_pci_find_memory_BAR(struct pci_dev *pdev,
-				     unsigned long *memory_bar);
-static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
-static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable);
-
-/* performant mode helper functions */
-static void  calc_bucket_map(int *bucket, int num_buckets, int nsgs,
-				int *bucket_map);
-static void cciss_put_controller_into_performant_mode(ctlr_info_t *h);
-
-#ifdef CONFIG_PROC_FS
-static void cciss_procinit(ctlr_info_t *h);
-#else
-static void cciss_procinit(ctlr_info_t *h)
-{
-}
-#endif				/* CONFIG_PROC_FS */
-
-#ifdef CONFIG_COMPAT
-static int cciss_compat_ioctl(struct block_device *, fmode_t,
-			      unsigned, unsigned long);
-#endif
-
-static const struct block_device_operations cciss_fops = {
-	.owner = THIS_MODULE,
-	.open = cciss_unlocked_open,
-	.release = cciss_release,
-	.ioctl = cciss_ioctl,
-	.getgeo = cciss_getgeo,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl = cciss_compat_ioctl,
-#endif
-	.revalidate_disk = cciss_revalidate,
-};
-
-/* set_performant_mode: Modify the tag for cciss performant
- * set bit 0 for pull model, bits 3-1 for block fetch
- * register number
- */
-static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
-{
-	if (likely(h->transMethod & CFGTBL_Trans_Performant))
-		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-}
-
-/*
- * Enqueuing and dequeuing functions for cmdlists.
- */
-static inline void addQ(struct list_head *list, CommandList_struct *c)
-{
-	list_add_tail(&c->list, list);
-}
-
-static inline void removeQ(CommandList_struct *c)
-{
-	/*
-	 * After kexec/dump some commands might still
-	 * be in flight, which the firmware will try
-	 * to complete. Resetting the firmware doesn't work
-	 * with old fw revisions, so we have to mark
-	 * them off as 'stale' to prevent the driver from
-	 * falling over.
-	 */
-	if (WARN_ON(list_empty(&c->list))) {
-		c->cmd_type = CMD_MSG_STALE;
-		return;
-	}
-
-	list_del_init(&c->list);
-}
-
-static void enqueue_cmd_and_start_io(ctlr_info_t *h,
-	CommandList_struct *c)
-{
-	unsigned long flags;
-	set_performant_mode(h, c);
-	spin_lock_irqsave(&h->lock, flags);
-	addQ(&h->reqQ, c);
-	h->Qdepth++;
-	if (h->Qdepth > h->maxQsinceinit)
-		h->maxQsinceinit = h->Qdepth;
-	start_io(h);
-	spin_unlock_irqrestore(&h->lock, flags);
-}
-
-static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list,
-	int nr_cmds)
-{
-	int i;
-
-	if (!cmd_sg_list)
-		return;
-	for (i = 0; i < nr_cmds; i++) {
-		kfree(cmd_sg_list[i]);
-		cmd_sg_list[i] = NULL;
-	}
-	kfree(cmd_sg_list);
-}
-
-static SGDescriptor_struct **cciss_allocate_sg_chain_blocks(
-	ctlr_info_t *h, int chainsize, int nr_cmds)
-{
-	int j;
-	SGDescriptor_struct **cmd_sg_list;
-
-	if (chainsize <= 0)
-		return NULL;
-
-	cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL);
-	if (!cmd_sg_list)
-		return NULL;
-
-	/* Build up chain blocks for each command */
-	for (j = 0; j < nr_cmds; j++) {
-		/* Need a block of chainsized s/g elements. */
-		cmd_sg_list[j] = kmalloc((chainsize *
-			sizeof(*cmd_sg_list[j])), GFP_KERNEL);
-		if (!cmd_sg_list[j]) {
-			dev_err(&h->pdev->dev, "Cannot get memory "
-				"for s/g chains.\n");
-			goto clean;
-		}
-	}
-	return cmd_sg_list;
-clean:
-	cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds);
-	return NULL;
-}
-
-static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c)
-{
-	SGDescriptor_struct *chain_sg;
-	u64bit temp64;
-
-	if (c->Header.SGTotal <= h->max_cmd_sgentries)
-		return;
-
-	chain_sg = &c->SG[h->max_cmd_sgentries - 1];
-	temp64.val32.lower = chain_sg->Addr.lower;
-	temp64.val32.upper = chain_sg->Addr.upper;
-	pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
-}
-
-static int cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c,
-	SGDescriptor_struct *chain_block, int len)
-{
-	SGDescriptor_struct *chain_sg;
-	u64bit temp64;
-
-	chain_sg = &c->SG[h->max_cmd_sgentries - 1];
-	chain_sg->Ext = CCISS_SG_CHAIN;
-	chain_sg->Len = len;
-	temp64.val = pci_map_single(h->pdev, chain_block, len,
-				PCI_DMA_TODEVICE);
-	if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
-		dev_warn(&h->pdev->dev,
-			"%s: error mapping chain block for DMA\n",
-			__func__);
-		return -1;
-	}
-	chain_sg->Addr.lower = temp64.val32.lower;
-	chain_sg->Addr.upper = temp64.val32.upper;
-
-	return 0;
-}
-
-#include "cciss_scsi.c"		/* For SCSI tape support */
-
-static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
-	"UNKNOWN"
-};
-#define RAID_UNKNOWN (ARRAY_SIZE(raid_label)-1)
-
-#ifdef CONFIG_PROC_FS
-
-/*
- * Report information about this controller.
- */
-#define ENG_GIG 1000000000
-#define ENG_GIG_FACTOR (ENG_GIG/512)
-#define ENGAGE_SCSI	"engage scsi"
-
-static void cciss_seq_show_header(struct seq_file *seq)
-{
-	ctlr_info_t *h = seq->private;
-
-	seq_printf(seq, "%s: HP %s Controller\n"
-		"Board ID: 0x%08lx\n"
-		"Firmware Version: %c%c%c%c\n"
-		"IRQ: %d\n"
-		"Logical drives: %d\n"
-		"Current Q depth: %d\n"
-		"Current # commands on controller: %d\n"
-		"Max Q depth since init: %d\n"
-		"Max # commands on controller since init: %d\n"
-		"Max SG entries since init: %d\n",
-		h->devname,
-		h->product_name,
-		(unsigned long)h->board_id,
-		h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
-		h->firm_ver[3], (unsigned int)h->intr[h->intr_mode],
-		h->num_luns,
-		h->Qdepth, h->commands_outstanding,
-		h->maxQsinceinit, h->max_outstanding, h->maxSG);
-
-#ifdef CONFIG_CISS_SCSI_TAPE
-	cciss_seq_tape_report(seq, h);
-#endif /* CONFIG_CISS_SCSI_TAPE */
-}
-
-static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	ctlr_info_t *h = seq->private;
-	unsigned long flags;
-
-	/* prevent displaying bogus info during configuration
-	 * or deconfiguration of a logical volume
-	 */
-	spin_lock_irqsave(&h->lock, flags);
-	if (h->busy_configuring) {
-		spin_unlock_irqrestore(&h->lock, flags);
-		return ERR_PTR(-EBUSY);
-	}
-	h->busy_configuring = 1;
-	spin_unlock_irqrestore(&h->lock, flags);
-
-	if (*pos == 0)
-		cciss_seq_show_header(seq);
-
-	return pos;
-}
-
-static int cciss_seq_show(struct seq_file *seq, void *v)
-{
-	sector_t vol_sz, vol_sz_frac;
-	ctlr_info_t *h = seq->private;
-	unsigned ctlr = h->ctlr;
-	loff_t *pos = v;
-	drive_info_struct *drv = h->drv[*pos];
-
-	if (*pos > h->highest_lun)
-		return 0;
-
-	if (drv == NULL) /* it's possible for h->drv[] to have holes. */
-		return 0;
-
-	if (drv->heads == 0)
-		return 0;
-
-	vol_sz = drv->nr_blocks;
-	vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
-	vol_sz_frac *= 100;
-	sector_div(vol_sz_frac, ENG_GIG_FACTOR);
-
-	if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN)
-		drv->raid_level = RAID_UNKNOWN;
-	seq_printf(seq, "cciss/c%dd%d:"
-			"\t%4u.%02uGB\tRAID %s\n",
-			ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
-			raid_label[drv->raid_level]);
-	return 0;
-}
-
-static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	ctlr_info_t *h = seq->private;
-
-	if (*pos > h->highest_lun)
-		return NULL;
-	*pos += 1;
-
-	return pos;
-}
-
-static void cciss_seq_stop(struct seq_file *seq, void *v)
-{
-	ctlr_info_t *h = seq->private;
-
-	/* Only reset h->busy_configuring if we succeeded in setting
-	 * it during cciss_seq_start. */
-	if (v == ERR_PTR(-EBUSY))
-		return;
-
-	h->busy_configuring = 0;
-}
-
-static const struct seq_operations cciss_seq_ops = {
-	.start = cciss_seq_start,
-	.show  = cciss_seq_show,
-	.next  = cciss_seq_next,
-	.stop  = cciss_seq_stop,
-};
-
-static int cciss_seq_open(struct inode *inode, struct file *file)
-{
-	int ret = seq_open(file, &cciss_seq_ops);
-	struct seq_file *seq = file->private_data;
-
-	if (!ret)
-		seq->private = PDE_DATA(inode);
-
-	return ret;
-}
-
-static ssize_t
-cciss_proc_write(struct file *file, const char __user *buf,
-		 size_t length, loff_t *ppos)
-{
-	int err;
-	char *buffer;
-
-#ifndef CONFIG_CISS_SCSI_TAPE
-	return -EINVAL;
-#endif
-
-	if (!buf || length > PAGE_SIZE - 1)
-		return -EINVAL;
-
-	buffer = memdup_user_nul(buf, length);
-	if (IS_ERR(buffer))
-		return PTR_ERR(buffer);
-
-#ifdef CONFIG_CISS_SCSI_TAPE
-	if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
-		struct seq_file *seq = file->private_data;
-		ctlr_info_t *h = seq->private;
-
-		err = cciss_engage_scsi(h);
-		if (err == 0)
-			err = length;
-	} else
-#endif /* CONFIG_CISS_SCSI_TAPE */
-		err = -EINVAL;
-	/* might be nice to have "disengage" too, but it's not
-	   safely possible. (only 1 module use count, lock issues.) */
-
-	kfree(buffer);
-	return err;
-}
-
-static const struct file_operations cciss_proc_fops = {
-	.owner	 = THIS_MODULE,
-	.open    = cciss_seq_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = seq_release,
-	.write	 = cciss_proc_write,
-};
-
-static void cciss_procinit(ctlr_info_t *h)
-{
-	struct proc_dir_entry *pde;
-
-	if (proc_cciss == NULL)
-		proc_cciss = proc_mkdir("driver/cciss", NULL);
-	if (!proc_cciss)
-		return;
-	pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP |
-					S_IROTH, proc_cciss,
-					&cciss_proc_fops, h);
-}
-#endif				/* CONFIG_PROC_FS */
-
-#define MAX_PRODUCT_NAME_LEN 19
-
-#define to_hba(n) container_of(n, struct ctlr_info, dev)
-#define to_drv(n) container_of(n, drive_info_struct, dev)
-
-/* List of controllers which cannot be hard reset on kexec with reset_devices */
-static u32 unresettable_controller[] = {
-	0x3223103C, /* Smart Array P800 */
-	0x3234103C, /* Smart Array P400 */
-	0x3235103C, /* Smart Array P400i */
-	0x3211103C, /* Smart Array E200i */
-	0x3212103C, /* Smart Array E200 */
-	0x3213103C, /* Smart Array E200i */
-	0x3214103C, /* Smart Array E200i */
-	0x3215103C, /* Smart Array E200i */
-	0x3237103C, /* Smart Array E500 */
-	0x323D103C, /* Smart Array P700m */
-	0x40800E11, /* Smart Array 5i */
-	0x409C0E11, /* Smart Array 6400 */
-	0x409D0E11, /* Smart Array 6400 EM */
-	0x40700E11, /* Smart Array 5300 */
-	0x40820E11, /* Smart Array 532 */
-	0x40830E11, /* Smart Array 5312 */
-	0x409A0E11, /* Smart Array 641 */
-	0x409B0E11, /* Smart Array 642 */
-	0x40910E11, /* Smart Array 6i */
-};
-
-/* List of controllers which cannot even be soft reset */
-static u32 soft_unresettable_controller[] = {
-	0x40800E11, /* Smart Array 5i */
-	0x40700E11, /* Smart Array 5300 */
-	0x40820E11, /* Smart Array 532 */
-	0x40830E11, /* Smart Array 5312 */
-	0x409A0E11, /* Smart Array 641 */
-	0x409B0E11, /* Smart Array 642 */
-	0x40910E11, /* Smart Array 6i */
-	/* Exclude 640x boards.  These are two pci devices in one slot
-	 * which share a battery backed cache module.  One controls the
-	 * cache, the other accesses the cache through the one that controls
-	 * it.  If we reset the one controlling the cache, the other will
-	 * likely not be happy.  Just forbid resetting this conjoined mess.
-	 */
-	0x409C0E11, /* Smart Array 6400 */
-	0x409D0E11, /* Smart Array 6400 EM */
-};
-
-static int ctlr_is_hard_resettable(u32 board_id)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
-		if (unresettable_controller[i] == board_id)
-			return 0;
-	return 1;
-}
-
-static int ctlr_is_soft_resettable(u32 board_id)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
-		if (soft_unresettable_controller[i] == board_id)
-			return 0;
-	return 1;
-}
-
-static int ctlr_is_resettable(u32 board_id)
-{
-	return ctlr_is_hard_resettable(board_id) ||
-		ctlr_is_soft_resettable(board_id);
-}
-
-static ssize_t host_show_resettable(struct device *dev,
-				    struct device_attribute *attr,
-				    char *buf)
-{
-	struct ctlr_info *h = to_hba(dev);
-
-	return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
-}
-static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL);
-
-static ssize_t host_store_rescan(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf, size_t count)
-{
-	struct ctlr_info *h = to_hba(dev);
-
-	add_to_scan_list(h);
-	wake_up_process(cciss_scan_thread);
-	wait_for_completion_interruptible(&h->scan_wait);
-
-	return count;
-}
-static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
-
-static ssize_t host_show_transport_mode(struct device *dev,
-				 struct device_attribute *attr,
-				 char *buf)
-{
-	struct ctlr_info *h = to_hba(dev);
-
-	return snprintf(buf, 20, "%s\n",
-		h->transMethod & CFGTBL_Trans_Performant ?
-			"performant" : "simple");
-}
-static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL);
-
-static ssize_t dev_show_unique_id(struct device *dev,
-				 struct device_attribute *attr,
-				 char *buf)
-{
-	drive_info_struct *drv = to_drv(dev);
-	struct ctlr_info *h = to_hba(drv->dev.parent);
-	__u8 sn[16];
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&h->lock, flags);
-	if (h->busy_configuring)
-		ret = -EBUSY;
-	else
-		memcpy(sn, drv->serial_no, sizeof(sn));
-	spin_unlock_irqrestore(&h->lock, flags);
-
-	if (ret)
-		return ret;
-	else
-		return snprintf(buf, 16 * 2 + 2,
-				"%02X%02X%02X%02X%02X%02X%02X%02X"
-				"%02X%02X%02X%02X%02X%02X%02X%02X\n",
-				sn[0], sn[1], sn[2], sn[3],
-				sn[4], sn[5], sn[6], sn[7],
-				sn[8], sn[9], sn[10], sn[11],
-				sn[12], sn[13], sn[14], sn[15]);
-}
-static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
-
-static ssize_t dev_show_vendor(struct device *dev,
-			       struct device_attribute *attr,
-			       char *buf)
-{
-	drive_info_struct *drv = to_drv(dev);
-	struct ctlr_info *h = to_hba(drv->dev.parent);
-	char vendor[VENDOR_LEN + 1];
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&h->lock, flags);
-	if (h->busy_configuring)
-		ret = -EBUSY;
-	else
-		memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
-	spin_unlock_irqrestore(&h->lock, flags);
-
-	if (ret)
-		return ret;
-	else
-		return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
-}
-static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
-
-static ssize_t dev_show_model(struct device *dev,
-			      struct device_attribute *attr,
-			      char *buf)
-{
-	drive_info_struct *drv = to_drv(dev);
-	struct ctlr_info *h = to_hba(drv->dev.parent);
-	char model[MODEL_LEN + 1];
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&h->lock, flags);
-	if (h->busy_configuring)
-		ret = -EBUSY;
-	else
-		memcpy(model, drv->model, MODEL_LEN + 1);
-	spin_unlock_irqrestore(&h->lock, flags);
-
-	if (ret)
-		return ret;
-	else
-		return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
-}
-static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
-
-static ssize_t dev_show_rev(struct device *dev,
-			    struct device_attribute *attr,
-			    char *buf)
-{
-	drive_info_struct *drv = to_drv(dev);
-	struct ctlr_info *h = to_hba(drv->dev.parent);
-	char rev[REV_LEN + 1];
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&h->lock, flags);
-	if (h->busy_configuring)
-		ret = -EBUSY;
-	else
-		memcpy(rev, drv->rev, REV_LEN + 1);
-	spin_unlock_irqrestore(&h->lock, flags);
-
-	if (ret)
-		return ret;
-	else
-		return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
-}
-static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
-
-static ssize_t cciss_show_lunid(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	drive_info_struct *drv = to_drv(dev);
-	struct ctlr_info *h = to_hba(drv->dev.parent);
-	unsigned long flags;
-	unsigned char lunid[8];
-
-	spin_lock_irqsave(&h->lock, flags);
-	if (h->busy_configuring) {
-		spin_unlock_irqrestore(&h->lock, flags);
-		return -EBUSY;
-	}
-	if (!drv->heads) {
-		spin_unlock_irqrestore(&h->lock, flags);
-		return -ENOTTY;
-	}
-	memcpy(lunid, drv->LunID, sizeof(lunid));
-	spin_unlock_irqrestore(&h->lock, flags);
-	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-		lunid[0], lunid[1], lunid[2], lunid[3],
-		lunid[4], lunid[5], lunid[6], lunid[7]);
-}
-static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
-
-static ssize_t cciss_show_raid_level(struct device *dev,
-				     struct device_attribute *attr, char *buf)
-{
-	drive_info_struct *drv = to_drv(dev);
-	struct ctlr_info *h = to_hba(drv->dev.parent);
-	int raid;
-	unsigned long flags;
-
-	spin_lock_irqsave(&h->lock, flags);
-	if (h->busy_configuring) {
-		spin_unlock_irqrestore(&h->lock, flags);
-		return -EBUSY;
-	}
-	raid = drv->raid_level;
-	spin_unlock_irqrestore(&h->lock, flags);
-	if (raid < 0 || raid > RAID_UNKNOWN)
-		raid = RAID_UNKNOWN;
-
-	return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
-			raid_label[raid]);
-}
-static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
-
-static ssize_t cciss_show_usage_count(struct device *dev,
-				      struct device_attribute *attr, char *buf)
-{
-	drive_info_struct *drv = to_drv(dev);
-	struct ctlr_info *h = to_hba(drv->dev.parent);
-	unsigned long flags;
-	int count;
-
-	spin_lock_irqsave(&h->lock, flags);
-	if (h->busy_configuring) {
-		spin_unlock_irqrestore(&h->lock, flags);
-		return -EBUSY;
-	}
-	count = drv->usage_count;
-	spin_unlock_irqrestore(&h->lock, flags);
-	return snprintf(buf, 20, "%d\n", count);
-}
-static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
-
-static struct attribute *cciss_host_attrs[] = {
-	&dev_attr_rescan.attr,
-	&dev_attr_resettable.attr,
-	&dev_attr_transport_mode.attr,
-	NULL
-};
-
-static struct attribute_group cciss_host_attr_group = {
-	.attrs = cciss_host_attrs,
-};
-
-static const struct attribute_group *cciss_host_attr_groups[] = {
-	&cciss_host_attr_group,
-	NULL
-};
-
-static struct device_type cciss_host_type = {
-	.name		= "cciss_host",
-	.groups		= cciss_host_attr_groups,
-	.release	= cciss_hba_release,
-};
-
-static struct attribute *cciss_dev_attrs[] = {
-	&dev_attr_unique_id.attr,
-	&dev_attr_model.attr,
-	&dev_attr_vendor.attr,
-	&dev_attr_rev.attr,
-	&dev_attr_lunid.attr,
-	&dev_attr_raid_level.attr,
-	&dev_attr_usage_count.attr,
-	NULL
-};
-
-static struct attribute_group cciss_dev_attr_group = {
-	.attrs = cciss_dev_attrs,
-};
-
-static const struct attribute_group *cciss_dev_attr_groups[] = {
-	&cciss_dev_attr_group,
-	NULL
-};
-
-static struct device_type cciss_dev_type = {
-	.name		= "cciss_device",
-	.groups		= cciss_dev_attr_groups,
-	.release	= cciss_device_release,
-};
-
-static struct bus_type cciss_bus_type = {
-	.name		= "cciss",
-};
-
-/*
- * cciss_hba_release is called when the reference count
- * of h->dev goes to zero.
- */
-static void cciss_hba_release(struct device *dev)
-{
-	/*
-	 * nothing to do, but need this to avoid a warning
-	 * about not having a release handler from lib/kref.c.
-	 */
-}
-
-/*
- * Initialize sysfs entry for each controller.  This sets up and registers
- * the 'cciss#' directory for each individual controller under
- * /sys/bus/pci/devices/<dev>/.
- */
-static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
-{
-	device_initialize(&h->dev);
-	h->dev.type = &cciss_host_type;
-	h->dev.bus = &cciss_bus_type;
-	dev_set_name(&h->dev, "%s", h->devname);
-	h->dev.parent = &h->pdev->dev;
-
-	return device_add(&h->dev);
-}
-
-/*
- * Remove sysfs entries for an hba.
- */
-static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
-{
-	device_del(&h->dev);
-	put_device(&h->dev); /* final put. */
-}
-
-/* cciss_device_release is called when the reference count
- * of h->drv[x]dev goes to zero.
- */
-static void cciss_device_release(struct device *dev)
-{
-	drive_info_struct *drv = to_drv(dev);
-	kfree(drv);
-}
-
-/*
- * Initialize sysfs for each logical drive.  This sets up and registers
- * the 'c#d#' directory for each individual logical drive under
- * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
- * /sys/block/cciss!c#d# to this entry.
- */
-static long cciss_create_ld_sysfs_entry(struct ctlr_info *h,
-				       int drv_index)
-{
-	struct device *dev;
-
-	if (h->drv[drv_index]->device_initialized)
-		return 0;
-
-	dev = &h->drv[drv_index]->dev;
-	device_initialize(dev);
-	dev->type = &cciss_dev_type;
-	dev->bus = &cciss_bus_type;
-	dev_set_name(dev, "c%dd%d", h->ctlr, drv_index);
-	dev->parent = &h->dev;
-	h->drv[drv_index]->device_initialized = 1;
-	return device_add(dev);
-}
-
-/*
- * Remove sysfs entries for a logical drive.
- */
-static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
-	int ctlr_exiting)
-{
-	struct device *dev = &h->drv[drv_index]->dev;
-
-	/* special case for c*d0, we only destroy it on controller exit */
-	if (drv_index == 0 && !ctlr_exiting)
-		return;
-
-	device_del(dev);
-	put_device(dev); /* the "final" put. */
-	h->drv[drv_index] = NULL;
-}
-
-/*
- * For operations that cannot sleep, a command block is allocated at init,
- * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
- * which ones are free or in use.
- */
-static CommandList_struct *cmd_alloc(ctlr_info_t *h)
-{
-	CommandList_struct *c;
-	int i;
-	u64bit temp64;
-	dma_addr_t cmd_dma_handle, err_dma_handle;
-
-	do {
-		i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
-		if (i == h->nr_cmds)
-			return NULL;
-	} while (test_and_set_bit(i, h->cmd_pool_bits) != 0);
-	c = h->cmd_pool + i;
-	memset(c, 0, sizeof(CommandList_struct));
-	cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
-	c->err_info = h->errinfo_pool + i;
-	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
-	err_dma_handle = h->errinfo_pool_dhandle
-	    + i * sizeof(ErrorInfo_struct);
-	h->nr_allocs++;
-
-	c->cmdindex = i;
-
-	INIT_LIST_HEAD(&c->list);
-	c->busaddr = (__u32) cmd_dma_handle;
-	temp64.val = (__u64) err_dma_handle;
-	c->ErrDesc.Addr.lower = temp64.val32.lower;
-	c->ErrDesc.Addr.upper = temp64.val32.upper;
-	c->ErrDesc.Len = sizeof(ErrorInfo_struct);
-
-	c->ctlr = h->ctlr;
-	return c;
-}
-
-/* allocate a command using pci_alloc_consistent, used for ioctls,
- * etc., not for the main i/o path.
- */
-static CommandList_struct *cmd_special_alloc(ctlr_info_t *h)
-{
-	CommandList_struct *c;
-	u64bit temp64;
-	dma_addr_t cmd_dma_handle, err_dma_handle;
-
-	c = pci_zalloc_consistent(h->pdev, sizeof(CommandList_struct),
-				  &cmd_dma_handle);
-	if (c == NULL)
-		return NULL;
-
-	c->cmdindex = -1;
-
-	c->err_info = pci_zalloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
-					    &err_dma_handle);
-
-	if (c->err_info == NULL) {
-		pci_free_consistent(h->pdev,
-			sizeof(CommandList_struct), c, cmd_dma_handle);
-		return NULL;
-	}
-
-	INIT_LIST_HEAD(&c->list);
-	c->busaddr = (__u32) cmd_dma_handle;
-	temp64.val = (__u64) err_dma_handle;
-	c->ErrDesc.Addr.lower = temp64.val32.lower;
-	c->ErrDesc.Addr.upper = temp64.val32.upper;
-	c->ErrDesc.Len = sizeof(ErrorInfo_struct);
-
-	c->ctlr = h->ctlr;
-	return c;
-}
-
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
-{
-	int i;
-
-	i = c - h->cmd_pool;
-	clear_bit(i, h->cmd_pool_bits);
-	h->nr_frees++;
-}
-
-static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
-{
-	u64bit temp64;
-
-	temp64.val32.lower = c->ErrDesc.Addr.lower;
-	temp64.val32.upper = c->ErrDesc.Addr.upper;
-	pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
-			    c->err_info, (dma_addr_t) temp64.val);
-	pci_free_consistent(h->pdev, sizeof(CommandList_struct), c,
-		(dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr));
-}
-
-static inline ctlr_info_t *get_host(struct gendisk *disk)
-{
-	return disk->queue->queuedata;
-}
-
-static inline drive_info_struct *get_drv(struct gendisk *disk)
-{
-	return disk->private_data;
-}
-
-/*
- * Open.  Make sure the device is really there.
- */
-static int cciss_open(struct block_device *bdev, fmode_t mode)
-{
-	ctlr_info_t *h = get_host(bdev->bd_disk);
-	drive_info_struct *drv = get_drv(bdev->bd_disk);
-
-	dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name);
-	if (drv->busy_configuring)
-		return -EBUSY;
-	/*
-	 * Root is allowed to open raw volume zero even if it's not configured
-	 * so array config can still work. Root is also allowed to open any
-	 * volume that has a LUN ID, so it can issue IOCTL to reread the
-	 * disk information.  I don't think I really like this
-	 * but I'm already using way to many device nodes to claim another one
-	 * for "raw controller".
-	 */
-	if (drv->heads == 0) {
-		if (MINOR(bdev->bd_dev) != 0) {	/* not node 0? */
-			/* if not node 0 make sure it is a partition = 0 */
-			if (MINOR(bdev->bd_dev) & 0x0f) {
-				return -ENXIO;
-				/* if it is, make sure we have a LUN ID */
-			} else if (memcmp(drv->LunID, CTLR_LUNID,
-				sizeof(drv->LunID))) {
-				return -ENXIO;
-			}
-		}
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-	}
-	drv->usage_count++;
-	h->usage_count++;
-	return 0;
-}
-
-static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode)
-{
-	int ret;
-
-	mutex_lock(&cciss_mutex);
-	ret = cciss_open(bdev, mode);
-	mutex_unlock(&cciss_mutex);
-
-	return ret;
-}
-
-/*
- * Close.  Sync first.
- */
-static void cciss_release(struct gendisk *disk, fmode_t mode)
-{
-	ctlr_info_t *h;
-	drive_info_struct *drv;
-
-	mutex_lock(&cciss_mutex);
-	h = get_host(disk);
-	drv = get_drv(disk);
-	dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name);
-	drv->usage_count--;
-	h->usage_count--;
-	mutex_unlock(&cciss_mutex);
-}
-
-#ifdef CONFIG_COMPAT
-
-static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
-				  unsigned cmd, unsigned long arg);
-static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
-				      unsigned cmd, unsigned long arg);
-
-static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
-			      unsigned cmd, unsigned long arg)
-{
-	switch (cmd) {
-	case CCISS_GETPCIINFO:
-	case CCISS_GETINTINFO:
-	case CCISS_SETINTINFO:
-	case CCISS_GETNODENAME:
-	case CCISS_SETNODENAME:
-	case CCISS_GETHEARTBEAT:
-	case CCISS_GETBUSTYPES:
-	case CCISS_GETFIRMVER:
-	case CCISS_GETDRIVVER:
-	case CCISS_REVALIDVOLS:
-	case CCISS_DEREGDISK:
-	case CCISS_REGNEWDISK:
-	case CCISS_REGNEWD:
-	case CCISS_RESCANDISK:
-	case CCISS_GETLUNINFO:
-		return cciss_ioctl(bdev, mode, cmd, arg);
-
-	case CCISS_PASSTHRU32:
-		return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
-	case CCISS_BIG_PASSTHRU32:
-		return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg);
-
-	default:
-		return -ENOIOCTLCMD;
-	}
-}
-
-static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
-				  unsigned cmd, unsigned long arg)
-{
-	IOCTL32_Command_struct __user *arg32 =
-	    (IOCTL32_Command_struct __user *) arg;
-	IOCTL_Command_struct arg64;
-	IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
-	int err;
-	u32 cp;
-
-	memset(&arg64, 0, sizeof(arg64));
-	err = 0;
-	err |=
-	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
-			   sizeof(arg64.LUN_info));
-	err |=
-	    copy_from_user(&arg64.Request, &arg32->Request,
-			   sizeof(arg64.Request));
-	err |=
-	    copy_from_user(&arg64.error_info, &arg32->error_info,
-			   sizeof(arg64.error_info));
-	err |= get_user(arg64.buf_size, &arg32->buf_size);
-	err |= get_user(cp, &arg32->buf);
-	arg64.buf = compat_ptr(cp);
-	err |= copy_to_user(p, &arg64, sizeof(arg64));
-
-	if (err)
-		return -EFAULT;
-
-	err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
-	if (err)
-		return err;
-	err |=
-	    copy_in_user(&arg32->error_info, &p->error_info,
-			 sizeof(arg32->error_info));
-	if (err)
-		return -EFAULT;
-	return err;
-}
-
-static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
-				      unsigned cmd, unsigned long arg)
-{
-	BIG_IOCTL32_Command_struct __user *arg32 =
-	    (BIG_IOCTL32_Command_struct __user *) arg;
-	BIG_IOCTL_Command_struct arg64;
-	BIG_IOCTL_Command_struct __user *p =
-	    compat_alloc_user_space(sizeof(arg64));
-	int err;
-	u32 cp;
-
-	memset(&arg64, 0, sizeof(arg64));
-	err = 0;
-	err |=
-	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
-			   sizeof(arg64.LUN_info));
-	err |=
-	    copy_from_user(&arg64.Request, &arg32->Request,
-			   sizeof(arg64.Request));
-	err |=
-	    copy_from_user(&arg64.error_info, &arg32->error_info,
-			   sizeof(arg64.error_info));
-	err |= get_user(arg64.buf_size, &arg32->buf_size);
-	err |= get_user(arg64.malloc_size, &arg32->malloc_size);
-	err |= get_user(cp, &arg32->buf);
-	arg64.buf = compat_ptr(cp);
-	err |= copy_to_user(p, &arg64, sizeof(arg64));
-
-	if (err)
-		return -EFAULT;
-
-	err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
-	if (err)
-		return err;
-	err |=
-	    copy_in_user(&arg32->error_info, &p->error_info,
-			 sizeof(arg32->error_info));
-	if (err)
-		return -EFAULT;
-	return err;
-}
-#endif
-
-static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
-	drive_info_struct *drv = get_drv(bdev->bd_disk);
-
-	if (!drv->cylinders)
-		return -ENXIO;
-
-	geo->heads = drv->heads;
-	geo->sectors = drv->sectors;
-	geo->cylinders = drv->cylinders;
-	return 0;
-}
-
-static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c)
-{
-	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
-			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
-		(void)check_for_unit_attention(h, c);
-}
-
-static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
-{
-	cciss_pci_info_struct pciinfo;
-
-	if (!argp)
-		return -EINVAL;
-	pciinfo.domain = pci_domain_nr(h->pdev->bus);
-	pciinfo.bus = h->pdev->bus->number;
-	pciinfo.dev_fn = h->pdev->devfn;
-	pciinfo.board_id = h->board_id;
-	if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct)))
-		return -EFAULT;
-	return 0;
-}
-
-static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
-{
-	cciss_coalint_struct intinfo;
-	unsigned long flags;
-
-	if (!argp)
-		return -EINVAL;
-	spin_lock_irqsave(&h->lock, flags);
-	intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
-	intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
-	spin_unlock_irqrestore(&h->lock, flags);
-	if (copy_to_user
-	    (argp, &intinfo, sizeof(cciss_coalint_struct)))
-		return -EFAULT;
-	return 0;
-}
-
-static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
-{
-	cciss_coalint_struct intinfo;
-	unsigned long flags;
-	int i;
-
-	if (!argp)
-		return -EINVAL;
-	if (!capable(CAP_SYS_ADMIN))
-		return -EPERM;
-	if (copy_from_user(&intinfo, argp, sizeof(intinfo)))
-		return -EFAULT;
-	if ((intinfo.delay == 0) && (intinfo.count == 0))
-		return -EINVAL;
-	spin_lock_irqsave(&h->lock, flags);
-	/* Update the field, and then ring the doorbell */
-	writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay));
-	writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount));
-	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
-
-	for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
-		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
-			break;
-		udelay(1000); /* delay and try again */
-	}
-	spin_unlock_irqrestore(&h->lock, flags);
-	if (i >= MAX_IOCTL_CONFIG_WAIT)
-		return -EAGAIN;
-	return 0;
-}
-
-static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
-{
-	NodeName_type NodeName;
-	unsigned long flags;
-	int i;
-
-	if (!argp)
-		return -EINVAL;
-	spin_lock_irqsave(&h->lock, flags);
-	for (i = 0; i < 16; i++)
-		NodeName[i] = readb(&h->cfgtable->ServerName[i]);
-	spin_unlock_irqrestore(&h->lock, flags);
-	if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
-		return -EFAULT;
-	return 0;
-}
-
-static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
-{
-	NodeName_type NodeName;
-	unsigned long flags;
-	int i;
-
-	if (!argp)
-		return -EINVAL;
-	if (!capable(CAP_SYS_ADMIN))
-		return -EPERM;
-	if (copy_from_user(NodeName, argp, sizeof(NodeName_type)))
-		return -EFAULT;
-	spin_lock_irqsave(&h->lock, flags);
-	/* Update the field, and then ring the doorbell */
-	for (i = 0; i < 16; i++)
-		writeb(NodeName[i], &h->cfgtable->ServerName[i]);
-	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
-	for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
-		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
-			break;
-		udelay(1000); /* delay and try again */
-	}
-	spin_unlock_irqrestore(&h->lock, flags);
-	if (i >= MAX_IOCTL_CONFIG_WAIT)
-		return -EAGAIN;
-	return 0;
-}
-
-static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
-{
-	Heartbeat_type heartbeat;
-	unsigned long flags;
-
-	if (!argp)
-		return -EINVAL;
-	spin_lock_irqsave(&h->lock, flags);
-	heartbeat = readl(&h->cfgtable->HeartBeat);
-	spin_unlock_irqrestore(&h->lock, flags);
-	if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
-		return -EFAULT;
-	return 0;
-}
-
-static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
-{
-	BusTypes_type BusTypes;
-	unsigned long flags;
-
-	if (!argp)
-		return -EINVAL;
-	spin_lock_irqsave(&h->lock, flags);
-	BusTypes = readl(&h->cfgtable->BusTypes);
-	spin_unlock_irqrestore(&h->lock, flags);
-	if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
-		return -EFAULT;
-	return 0;
-}
-
-static int cciss_getfirmver(ctlr_info_t *h, void __user *argp)
-{
-	FirmwareVer_type firmware;
-
-	if (!argp)
-		return -EINVAL;
-	memcpy(firmware, h->firm_ver, 4);
-
-	if (copy_to_user
-	    (argp, firmware, sizeof(FirmwareVer_type)))
-		return -EFAULT;
-	return 0;
-}
-
-static int cciss_getdrivver(ctlr_info_t *h, void __user *argp)
-{
-	DriverVer_type DriverVer = DRIVER_VERSION;
-
-	if (!argp)
-		return -EINVAL;
-	if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
-		return -EFAULT;
-	return 0;
-}
-
-static int cciss_getluninfo(ctlr_info_t *h,
-	struct gendisk *disk, void __user *argp)
-{
-	LogvolInfo_struct luninfo;
-	drive_info_struct *drv = get_drv(disk);
-
-	if (!argp)
-		return -EINVAL;
-	memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID));
-	luninfo.num_opens = drv->usage_count;
-	luninfo.num_parts = 0;
-	if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct)))
-		return -EFAULT;
-	return 0;
-}
-
-static int cciss_passthru(ctlr_info_t *h, void __user *argp)
-{
-	IOCTL_Command_struct iocommand;
-	CommandList_struct *c;
-	char *buff = NULL;
-	u64bit temp64;
-	DECLARE_COMPLETION_ONSTACK(wait);
-
-	if (!argp)
-		return -EINVAL;
-
-	if (!capable(CAP_SYS_RAWIO))
-		return -EPERM;
-
-	if (copy_from_user
-	    (&iocommand, argp, sizeof(IOCTL_Command_struct)))
-		return -EFAULT;
-	if ((iocommand.buf_size < 1) &&
-	    (iocommand.Request.Type.Direction != XFER_NONE)) {
-		return -EINVAL;
-	}
-	if (iocommand.buf_size > 0) {
-		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
-		if (buff == NULL)
-			return -EFAULT;
-	}
-	if (iocommand.Request.Type.Direction == XFER_WRITE) {
-		/* Copy the data into the buffer we created */
-		if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
-			kfree(buff);
-			return -EFAULT;
-		}
-	} else {
-		memset(buff, 0, iocommand.buf_size);
-	}
-	c = cmd_special_alloc(h);
-	if (!c) {
-		kfree(buff);
-		return -ENOMEM;
-	}
-	/* Fill in the command type */
-	c->cmd_type = CMD_IOCTL_PEND;
-	/* Fill in Command Header */
-	c->Header.ReplyQueue = 0;   /* unused in simple mode */
-	if (iocommand.buf_size > 0) { /* buffer to fill */
-		c->Header.SGList = 1;
-		c->Header.SGTotal = 1;
-	} else { /* no buffers to fill */
-		c->Header.SGList = 0;
-		c->Header.SGTotal = 0;
-	}
-	c->Header.LUN = iocommand.LUN_info;
-	/* use the kernel address the cmd block for tag */
-	c->Header.Tag.lower = c->busaddr;
-
-	/* Fill in Request block */
-	c->Request = iocommand.Request;
-
-	/* Fill in the scatter gather information */
-	if (iocommand.buf_size > 0) {
-		temp64.val = pci_map_single(h->pdev, buff,
-			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
-		c->SG[0].Addr.lower = temp64.val32.lower;
-		c->SG[0].Addr.upper = temp64.val32.upper;
-		c->SG[0].Len = iocommand.buf_size;
-		c->SG[0].Ext = 0;  /* we are not chaining */
-	}
-	c->waiting = &wait;
-
-	enqueue_cmd_and_start_io(h, c);
-	wait_for_completion(&wait);
-
-	/* unlock the buffers from DMA */
-	temp64.val32.lower = c->SG[0].Addr.lower;
-	temp64.val32.upper = c->SG[0].Addr.upper;
-	pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size,
-			 PCI_DMA_BIDIRECTIONAL);
-	check_ioctl_unit_attention(h, c);
-
-	/* Copy the error information out */
-	iocommand.error_info = *(c->err_info);
-	if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) {
-		kfree(buff);
-		cmd_special_free(h, c);
-		return -EFAULT;
-	}
-
-	if (iocommand.Request.Type.Direction == XFER_READ) {
-		/* Copy the data out of the buffer we created */
-		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
-			kfree(buff);
-			cmd_special_free(h, c);
-			return -EFAULT;
-		}
-	}
-	kfree(buff);
-	cmd_special_free(h, c);
-	return 0;
-}
-
-static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp)
-{
-	BIG_IOCTL_Command_struct *ioc;
-	CommandList_struct *c;
-	unsigned char **buff = NULL;
-	int *buff_size = NULL;
-	u64bit temp64;
-	BYTE sg_used = 0;
-	int status = 0;
-	int i;
-	DECLARE_COMPLETION_ONSTACK(wait);
-	__u32 left;
-	__u32 sz;
-	BYTE __user *data_ptr;
-
-	if (!argp)
-		return -EINVAL;
-	if (!capable(CAP_SYS_RAWIO))
-		return -EPERM;
-	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
-	if (!ioc) {
-		status = -ENOMEM;
-		goto cleanup1;
-	}
-	if (copy_from_user(ioc, argp, sizeof(*ioc))) {
-		status = -EFAULT;
-		goto cleanup1;
-	}
-	if ((ioc->buf_size < 1) &&
-	    (ioc->Request.Type.Direction != XFER_NONE)) {
-		status = -EINVAL;
-		goto cleanup1;
-	}
-	/* Check kmalloc limits  using all SGs */
-	if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
-		status = -EINVAL;
-		goto cleanup1;
-	}
-	if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
-		status = -EINVAL;
-		goto cleanup1;
-	}
-	buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
-	if (!buff) {
-		status = -ENOMEM;
-		goto cleanup1;
-	}
-	buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
-	if (!buff_size) {
-		status = -ENOMEM;
-		goto cleanup1;
-	}
-	left = ioc->buf_size;
-	data_ptr = ioc->buf;
-	while (left) {
-		sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
-		buff_size[sg_used] = sz;
-		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
-		if (buff[sg_used] == NULL) {
-			status = -ENOMEM;
-			goto cleanup1;
-		}
-		if (ioc->Request.Type.Direction == XFER_WRITE) {
-			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
-				status = -EFAULT;
-				goto cleanup1;
-			}
-		} else {
-			memset(buff[sg_used], 0, sz);
-		}
-		left -= sz;
-		data_ptr += sz;
-		sg_used++;
-	}
-	c = cmd_special_alloc(h);
-	if (!c) {
-		status = -ENOMEM;
-		goto cleanup1;
-	}
-	c->cmd_type = CMD_IOCTL_PEND;
-	c->Header.ReplyQueue = 0;
-	c->Header.SGList = sg_used;
-	c->Header.SGTotal = sg_used;
-	c->Header.LUN = ioc->LUN_info;
-	c->Header.Tag.lower = c->busaddr;
-
-	c->Request = ioc->Request;
-	for (i = 0; i < sg_used; i++) {
-		temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i],
-				    PCI_DMA_BIDIRECTIONAL);
-		c->SG[i].Addr.lower = temp64.val32.lower;
-		c->SG[i].Addr.upper = temp64.val32.upper;
-		c->SG[i].Len = buff_size[i];
-		c->SG[i].Ext = 0;	/* we are not chaining */
-	}
-	c->waiting = &wait;
-	enqueue_cmd_and_start_io(h, c);
-	wait_for_completion(&wait);
-	/* unlock the buffers from DMA */
-	for (i = 0; i < sg_used; i++) {
-		temp64.val32.lower = c->SG[i].Addr.lower;
-		temp64.val32.upper = c->SG[i].Addr.upper;
-		pci_unmap_single(h->pdev,
-			(dma_addr_t) temp64.val, buff_size[i],
-			PCI_DMA_BIDIRECTIONAL);
-	}
-	check_ioctl_unit_attention(h, c);
-	/* Copy the error information out */
-	ioc->error_info = *(c->err_info);
-	if (copy_to_user(argp, ioc, sizeof(*ioc))) {
-		cmd_special_free(h, c);
-		status = -EFAULT;
-		goto cleanup1;
-	}
-	if (ioc->Request.Type.Direction == XFER_READ) {
-		/* Copy the data out of the buffer we created */
-		BYTE __user *ptr = ioc->buf;
-		for (i = 0; i < sg_used; i++) {
-			if (copy_to_user(ptr, buff[i], buff_size[i])) {
-				cmd_special_free(h, c);
-				status = -EFAULT;
-				goto cleanup1;
-			}
-			ptr += buff_size[i];
-		}
-	}
-	cmd_special_free(h, c);
-	status = 0;
-cleanup1:
-	if (buff) {
-		for (i = 0; i < sg_used; i++)
-			kfree(buff[i]);
-		kfree(buff);
-	}
-	kfree(buff_size);
-	kfree(ioc);
-	return status;
-}
-
-static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
-	unsigned int cmd, unsigned long arg)
-{
-	struct gendisk *disk = bdev->bd_disk;
-	ctlr_info_t *h = get_host(disk);
-	void __user *argp = (void __user *)arg;
-
-	dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n",
-		cmd, arg);
-	switch (cmd) {
-	case CCISS_GETPCIINFO:
-		return cciss_getpciinfo(h, argp);
-	case CCISS_GETINTINFO:
-		return cciss_getintinfo(h, argp);
-	case CCISS_SETINTINFO:
-		return cciss_setintinfo(h, argp);
-	case CCISS_GETNODENAME:
-		return cciss_getnodename(h, argp);
-	case CCISS_SETNODENAME:
-		return cciss_setnodename(h, argp);
-	case CCISS_GETHEARTBEAT:
-		return cciss_getheartbeat(h, argp);
-	case CCISS_GETBUSTYPES:
-		return cciss_getbustypes(h, argp);
-	case CCISS_GETFIRMVER:
-		return cciss_getfirmver(h, argp);
-	case CCISS_GETDRIVVER:
-		return cciss_getdrivver(h, argp);
-	case CCISS_DEREGDISK:
-	case CCISS_REGNEWD:
-	case CCISS_REVALIDVOLS:
-		return rebuild_lun_table(h, 0, 1);
-	case CCISS_GETLUNINFO:
-		return cciss_getluninfo(h, disk, argp);
-	case CCISS_PASSTHRU:
-		return cciss_passthru(h, argp);
-	case CCISS_BIG_PASSTHRU:
-		return cciss_bigpassthru(h, argp);
-
-	/* scsi_cmd_blk_ioctl handles these, below, though some are not */
-	/* very meaningful for cciss.  SG_IO is the main one people want. */
-
-	case SG_GET_VERSION_NUM:
-	case SG_SET_TIMEOUT:
-	case SG_GET_TIMEOUT:
-	case SG_GET_RESERVED_SIZE:
-	case SG_SET_RESERVED_SIZE:
-	case SG_EMULATED_HOST:
-	case SG_IO:
-	case SCSI_IOCTL_SEND_COMMAND:
-		return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
-
-	/* scsi_cmd_blk_ioctl would normally handle these, below, but */
-	/* they aren't a good fit for cciss, as CD-ROMs are */
-	/* not supported, and we don't have any bus/target/lun */
-	/* which we present to the kernel. */
-
-	case CDROM_SEND_PACKET:
-	case CDROMCLOSETRAY:
-	case CDROMEJECT:
-	case SCSI_IOCTL_GET_IDLUN:
-	case SCSI_IOCTL_GET_BUS_NUMBER:
-	default:
-		return -ENOTTY;
-	}
-}
-
-static void cciss_check_queues(ctlr_info_t *h)
-{
-	int start_queue = h->next_to_run;
-	int i;
-
-	/* check to see if we have maxed out the number of commands that can
-	 * be placed on the queue.  If so then exit.  We do this check here
-	 * in case the interrupt we serviced was from an ioctl and did not
-	 * free any new commands.
-	 */
-	if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
-		return;
-
-	/* We have room on the queue for more commands.  Now we need to queue
-	 * them up.  We will also keep track of the next queue to run so
-	 * that every queue gets a chance to be started first.
-	 */
-	for (i = 0; i < h->highest_lun + 1; i++) {
-		int curr_queue = (start_queue + i) % (h->highest_lun + 1);
-		/* make sure the disk has been added and the drive is real
-		 * because this can be called from the middle of init_one.
-		 */
-		if (!h->drv[curr_queue])
-			continue;
-		if (!(h->drv[curr_queue]->queue) ||
-			!(h->drv[curr_queue]->heads))
-			continue;
-		blk_start_queue(h->gendisk[curr_queue]->queue);
-
-		/* check to see if we have maxed out the number of commands
-		 * that can be placed on the queue.
-		 */
-		if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
-			if (curr_queue == start_queue) {
-				h->next_to_run =
-				    (start_queue + 1) % (h->highest_lun + 1);
-				break;
-			} else {
-				h->next_to_run = curr_queue;
-				break;
-			}
-		}
-	}
-}
-
-static void cciss_softirq_done(struct request *rq)
-{
-	CommandList_struct *c = rq->completion_data;
-	ctlr_info_t *h = hba[c->ctlr];
-	SGDescriptor_struct *curr_sg = c->SG;
-	u64bit temp64;
-	unsigned long flags;
-	int i, ddir;
-	int sg_index = 0;
-
-	if (c->Request.Type.Direction == XFER_READ)
-		ddir = PCI_DMA_FROMDEVICE;
-	else
-		ddir = PCI_DMA_TODEVICE;
-
-	/* command did not need to be retried */
-	/* unmap the DMA mapping for all the scatter gather elements */
-	for (i = 0; i < c->Header.SGList; i++) {
-		if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
-			cciss_unmap_sg_chain_block(h, c);
-			/* Point to the next block */
-			curr_sg = h->cmd_sg_list[c->cmdindex];
-			sg_index = 0;
-		}
-		temp64.val32.lower = curr_sg[sg_index].Addr.lower;
-		temp64.val32.upper = curr_sg[sg_index].Addr.upper;
-		pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len,
-				ddir);
-		++sg_index;
-	}
-
-	dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
-
-	/* set the residual count for pc requests */
-	if (blk_rq_is_passthrough(rq))
-		scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
-	blk_end_request_all(rq, scsi_req(rq)->result ?
-			BLK_STS_IOERR : BLK_STS_OK);
-
-	spin_lock_irqsave(&h->lock, flags);
-	cmd_free(h, c);
-	cciss_check_queues(h);
-	spin_unlock_irqrestore(&h->lock, flags);
-}
-
-static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
-	unsigned char scsi3addr[], uint32_t log_unit)
-{
-	memcpy(scsi3addr, h->drv[log_unit]->LunID,
-		sizeof(h->drv[log_unit]->LunID));
-}
-
-/* This function gets the SCSI vendor, model, and revision of a logical drive
- * via the inquiry page 0.  Model, vendor, and rev are set to empty strings if
- * they cannot be read.
- */
-static void cciss_get_device_descr(ctlr_info_t *h, int logvol,
-				   char *vendor, char *model, char *rev)
-{
-	int rc;
-	InquiryData_struct *inq_buf;
-	unsigned char scsi3addr[8];
-
-	*vendor = '\0';
-	*model = '\0';
-	*rev = '\0';
-
-	inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-	if (!inq_buf)
-		return;
-
-	log_unit_to_scsi3addr(h, scsi3addr, logvol);
-	rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0,
-			scsi3addr, TYPE_CMD);
-	if (rc == IO_OK) {
-		memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
-		vendor[VENDOR_LEN] = '\0';
-		memcpy(model, &inq_buf->data_byte[16], MODEL_LEN);
-		model[MODEL_LEN] = '\0';
-		memcpy(rev, &inq_buf->data_byte[32], REV_LEN);
-		rev[REV_LEN] = '\0';
-	}
-
-	kfree(inq_buf);
-	return;
-}
-
-/* This function gets the serial number of a logical drive via
- * inquiry page 0x83.  Serial no. is 16 bytes.  If the serial
- * number cannot be had, for whatever reason, 16 bytes of 0xff
- * are returned instead.
- */
-static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
-				unsigned char *serial_no, int buflen)
-{
-#define PAGE_83_INQ_BYTES 64
-	int rc;
-	unsigned char *buf;
-	unsigned char scsi3addr[8];
-
-	if (buflen > 16)
-		buflen = 16;
-	memset(serial_no, 0xff, buflen);
-	buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL);
-	if (!buf)
-		return;
-	memset(serial_no, 0, buflen);
-	log_unit_to_scsi3addr(h, scsi3addr, logvol);
-	rc = sendcmd_withirq(h, CISS_INQUIRY, buf,
-		PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
-	if (rc == IO_OK)
-		memcpy(serial_no, &buf[8], buflen);
-	kfree(buf);
-	return;
-}
-
-static void cciss_initialize_rq(struct request *rq)
-{
-	struct scsi_request *sreq = blk_mq_rq_to_pdu(rq);
-
-	scsi_req_init(sreq);
-}
-
-/*
- * cciss_add_disk sets up the block device queue for a logical drive
- */
-static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
-				int drv_index)
-{
-	disk->queue = blk_alloc_queue(GFP_KERNEL);
-	if (!disk->queue)
-		goto init_queue_failure;
-
-	disk->queue->cmd_size = sizeof(struct scsi_request);
-	disk->queue->request_fn = do_cciss_request;
-	disk->queue->initialize_rq_fn = cciss_initialize_rq;
-	disk->queue->queue_lock = &h->lock;
-	queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, disk->queue);
-	if (blk_init_allocated_queue(disk->queue) < 0)
-		goto cleanup_queue;
-
-	sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
-	disk->major = h->major;
-	disk->first_minor = drv_index << NWD_SHIFT;
-	disk->fops = &cciss_fops;
-	if (cciss_create_ld_sysfs_entry(h, drv_index))
-		goto cleanup_queue;
-	disk->private_data = h->drv[drv_index];
-
-	/* Set up queue information */
-	blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
-
-	/* This is a hardware imposed limit. */
-	blk_queue_max_segments(disk->queue, h->maxsgentries);
-
-	blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
-
-	blk_queue_softirq_done(disk->queue, cciss_softirq_done);
-
-	disk->queue->queuedata = h;
-
-	blk_queue_logical_block_size(disk->queue,
-				     h->drv[drv_index]->block_size);
-
-	/* Make sure all queue data is written out before */
-	/* setting h->drv[drv_index]->queue, as setting this */
-	/* allows the interrupt handler to start the queue */
-	wmb();
-	h->drv[drv_index]->queue = disk->queue;
-	device_add_disk(&h->drv[drv_index]->dev, disk);
-	return 0;
-
-cleanup_queue:
-	blk_cleanup_queue(disk->queue);
-	disk->queue = NULL;
-init_queue_failure:
-	return -1;
-}
-
-/* This function will check the usage_count of the drive to be updated/added.
- * If the usage_count is zero and it is a heretofore unknown drive, or,
- * the drive's capacity, geometry, or serial number has changed,
- * then the drive information will be updated and the disk will be
- * re-registered with the kernel.  If these conditions don't hold,
- * then it will be left alone for the next reboot.  The exception to this
- * is disk 0 which will always be left registered with the kernel since it
- * is also the controller node.  Any changes to disk 0 will show up on
- * the next reboot.
- */
-static void cciss_update_drive_info(ctlr_info_t *h, int drv_index,
-	int first_time, int via_ioctl)
-{
-	struct gendisk *disk;
-	InquiryData_struct *inq_buff = NULL;
-	unsigned int block_size;
-	sector_t total_size;
-	unsigned long flags = 0;
-	int ret = 0;
-	drive_info_struct *drvinfo;
-
-	/* Get information about the disk and modify the driver structure */
-	inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-	drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL);
-	if (inq_buff == NULL || drvinfo == NULL)
-		goto mem_msg;
-
-	/* testing to see if 16-byte CDBs are already being used */
-	if (h->cciss_read == CCISS_READ_16) {
-		cciss_read_capacity_16(h, drv_index,
-			&total_size, &block_size);
-
-	} else {
-		cciss_read_capacity(h, drv_index, &total_size, &block_size);
-		/* if read_capacity returns all F's this volume is >2TB */
-		/* in size so we switch to 16-byte CDB's for all */
-		/* read/write ops */
-		if (total_size == 0xFFFFFFFFULL) {
-			cciss_read_capacity_16(h, drv_index,
-			&total_size, &block_size);
-			h->cciss_read = CCISS_READ_16;
-			h->cciss_write = CCISS_WRITE_16;
-		} else {
-			h->cciss_read = CCISS_READ_10;
-			h->cciss_write = CCISS_WRITE_10;
-		}
-	}
-
-	cciss_geometry_inquiry(h, drv_index, total_size, block_size,
-			       inq_buff, drvinfo);
-	drvinfo->block_size = block_size;
-	drvinfo->nr_blocks = total_size + 1;
-
-	cciss_get_device_descr(h, drv_index, drvinfo->vendor,
-				drvinfo->model, drvinfo->rev);
-	cciss_get_serial_no(h, drv_index, drvinfo->serial_no,
-			sizeof(drvinfo->serial_no));
-	/* Save the lunid in case we deregister the disk, below. */
-	memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
-		sizeof(drvinfo->LunID));
-
-	/* Is it the same disk we already know, and nothing's changed? */
-	if (h->drv[drv_index]->raid_level != -1 &&
-		((memcmp(drvinfo->serial_no,
-				h->drv[drv_index]->serial_no, 16) == 0) &&
-		drvinfo->block_size == h->drv[drv_index]->block_size &&
-		drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks &&
-		drvinfo->heads == h->drv[drv_index]->heads &&
-		drvinfo->sectors == h->drv[drv_index]->sectors &&
-		drvinfo->cylinders == h->drv[drv_index]->cylinders))
-			/* The disk is unchanged, nothing to update */
-			goto freeret;
-
-	/* If we get here it's not the same disk, or something's changed,
-	 * so we need to * deregister it, and re-register it, if it's not
-	 * in use.
-	 * If the disk already exists then deregister it before proceeding
-	 * (unless it's the first disk (for the controller node).
-	 */
-	if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
-		dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index);
-		spin_lock_irqsave(&h->lock, flags);
-		h->drv[drv_index]->busy_configuring = 1;
-		spin_unlock_irqrestore(&h->lock, flags);
-
-		/* deregister_disk sets h->drv[drv_index]->queue = NULL
-		 * which keeps the interrupt handler from starting
-		 * the queue.
-		 */
-		ret = deregister_disk(h, drv_index, 0, via_ioctl);
-	}
-
-	/* If the disk is in use return */
-	if (ret)
-		goto freeret;
-
-	/* Save the new information from cciss_geometry_inquiry
-	 * and serial number inquiry.  If the disk was deregistered
-	 * above, then h->drv[drv_index] will be NULL.
-	 */
-	if (h->drv[drv_index] == NULL) {
-		drvinfo->device_initialized = 0;
-		h->drv[drv_index] = drvinfo;
-		drvinfo = NULL; /* so it won't be freed below. */
-	} else {
-		/* special case for cxd0 */
-		h->drv[drv_index]->block_size = drvinfo->block_size;
-		h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks;
-		h->drv[drv_index]->heads = drvinfo->heads;
-		h->drv[drv_index]->sectors = drvinfo->sectors;
-		h->drv[drv_index]->cylinders = drvinfo->cylinders;
-		h->drv[drv_index]->raid_level = drvinfo->raid_level;
-		memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16);
-		memcpy(h->drv[drv_index]->vendor, drvinfo->vendor,
-			VENDOR_LEN + 1);
-		memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1);
-		memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1);
-	}
-
-	++h->num_luns;
-	disk = h->gendisk[drv_index];
-	set_capacity(disk, h->drv[drv_index]->nr_blocks);
-
-	/* If it's not disk 0 (drv_index != 0)
-	 * or if it was disk 0, but there was previously
-	 * no actual corresponding configured logical drive
-	 * (raid_leve == -1) then we want to update the
-	 * logical drive's information.
-	 */
-	if (drv_index || first_time) {
-		if (cciss_add_disk(h, disk, drv_index) != 0) {
-			cciss_free_gendisk(h, drv_index);
-			cciss_free_drive_info(h, drv_index);
-			dev_warn(&h->pdev->dev, "could not update disk %d\n",
-				drv_index);
-			--h->num_luns;
-		}
-	}
-
-freeret:
-	kfree(inq_buff);
-	kfree(drvinfo);
-	return;
-mem_msg:
-	dev_err(&h->pdev->dev, "out of memory\n");
-	goto freeret;
-}
-
-/* This function will find the first index of the controllers drive array
- * that has a null drv pointer and allocate the drive info struct and
- * will return that index   This is where new drives will be added.
- * If the index to be returned is greater than the highest_lun index for
- * the controller then highest_lun is set * to this new index.
- * If there are no available indexes or if tha allocation fails, then -1
- * is returned.  * "controller_node" is used to know if this is a real
- * logical drive, or just the controller node, which determines if this
- * counts towards highest_lun.
- */
-static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node)
-{
-	int i;
-	drive_info_struct *drv;
-
-	/* Search for an empty slot for our drive info */
-	for (i = 0; i < CISS_MAX_LUN; i++) {
-
-		/* if not cxd0 case, and it's occupied, skip it. */
-		if (h->drv[i] && i != 0)
-			continue;
-		/*
-		 * If it's cxd0 case, and drv is alloc'ed already, and a
-		 * disk is configured there, skip it.
-		 */
-		if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1)
-			continue;
-
-		/*
-		 * We've found an empty slot.  Update highest_lun
-		 * provided this isn't just the fake cxd0 controller node.
-		 */
-		if (i > h->highest_lun && !controller_node)
-			h->highest_lun = i;
-
-		/* If adding a real disk at cxd0, and it's already alloc'ed */
-		if (i == 0 && h->drv[i] != NULL)
-			return i;
-
-		/*
-		 * Found an empty slot, not already alloc'ed.  Allocate it.
-		 * Mark it with raid_level == -1, so we know it's new later on.
-		 */
-		drv = kzalloc(sizeof(*drv), GFP_KERNEL);
-		if (!drv)
-			return -1;
-		drv->raid_level = -1; /* so we know it's new */
-		h->drv[i] = drv;
-		return i;
-	}
-	return -1;
-}
-
-static void cciss_free_drive_info(ctlr_info_t *h, int drv_index)
-{
-	kfree(h->drv[drv_index]);
-	h->drv[drv_index] = NULL;
-}
-
-static void cciss_free_gendisk(ctlr_info_t *h, int drv_index)
-{
-	put_disk(h->gendisk[drv_index]);
-	h->gendisk[drv_index] = NULL;
-}
-
-/* cciss_add_gendisk finds a free hba[]->drv structure
- * and allocates a gendisk if needed, and sets the lunid
- * in the drvinfo structure.   It returns the index into
- * the ->drv[] array, or -1 if none are free.
- * is_controller_node indicates whether highest_lun should
- * count this disk, or if it's only being added to provide
- * a means to talk to the controller in case no logical
- * drives have yet been configured.
- */
-static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
-	int controller_node)
-{
-	int drv_index;
-
-	drv_index = cciss_alloc_drive_info(h, controller_node);
-	if (drv_index == -1)
-		return -1;
-
-	/*Check if the gendisk needs to be allocated */
-	if (!h->gendisk[drv_index]) {
-		h->gendisk[drv_index] =
-			alloc_disk(1 << NWD_SHIFT);
-		if (!h->gendisk[drv_index]) {
-			dev_err(&h->pdev->dev,
-				"could not allocate a new disk %d\n",
-				drv_index);
-			goto err_free_drive_info;
-		}
-	}
-	memcpy(h->drv[drv_index]->LunID, lunid,
-		sizeof(h->drv[drv_index]->LunID));
-	if (cciss_create_ld_sysfs_entry(h, drv_index))
-		goto err_free_disk;
-	/* Don't need to mark this busy because nobody */
-	/* else knows about this disk yet to contend */
-	/* for access to it. */
-	h->drv[drv_index]->busy_configuring = 0;
-	wmb();
-	return drv_index;
-
-err_free_disk:
-	cciss_free_gendisk(h, drv_index);
-err_free_drive_info:
-	cciss_free_drive_info(h, drv_index);
-	return -1;
-}
-
-/* This is for the special case of a controller which
- * has no logical drives.  In this case, we still need
- * to register a disk so the controller can be accessed
- * by the Array Config Utility.
- */
-static void cciss_add_controller_node(ctlr_info_t *h)
-{
-	struct gendisk *disk;
-	int drv_index;
-
-	if (h->gendisk[0] != NULL) /* already did this? Then bail. */
-		return;
-
-	drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1);
-	if (drv_index == -1)
-		goto error;
-	h->drv[drv_index]->block_size = 512;
-	h->drv[drv_index]->nr_blocks = 0;
-	h->drv[drv_index]->heads = 0;
-	h->drv[drv_index]->sectors = 0;
-	h->drv[drv_index]->cylinders = 0;
-	h->drv[drv_index]->raid_level = -1;
-	memset(h->drv[drv_index]->serial_no, 0, 16);
-	disk = h->gendisk[drv_index];
-	if (cciss_add_disk(h, disk, drv_index) == 0)
-		return;
-	cciss_free_gendisk(h, drv_index);
-	cciss_free_drive_info(h, drv_index);
-error:
-	dev_warn(&h->pdev->dev, "could not add disk 0.\n");
-	return;
-}
-
-/* This function will add and remove logical drives from the Logical
- * drive array of the controller and maintain persistency of ordering
- * so that mount points are preserved until the next reboot.  This allows
- * for the removal of logical drives in the middle of the drive array
- * without a re-ordering of those drives.
- * INPUT
- * h		= The controller to perform the operations on
- */
-static int rebuild_lun_table(ctlr_info_t *h, int first_time,
-	int via_ioctl)
-{
-	int num_luns;
-	ReportLunData_struct *ld_buff = NULL;
-	int return_code;
-	int listlength = 0;
-	int i;
-	int drv_found;
-	int drv_index = 0;
-	unsigned char lunid[8] = CTLR_LUNID;
-	unsigned long flags;
-
-	if (!capable(CAP_SYS_RAWIO))
-		return -EPERM;
-
-	/* Set busy_configuring flag for this operation */
-	spin_lock_irqsave(&h->lock, flags);
-	if (h->busy_configuring) {
-		spin_unlock_irqrestore(&h->lock, flags);
-		return -EBUSY;
-	}
-	h->busy_configuring = 1;
-	spin_unlock_irqrestore(&h->lock, flags);
-
-	ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
-	if (ld_buff == NULL)
-		goto mem_msg;
-
-	return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff,
-				      sizeof(ReportLunData_struct),
-				      0, CTLR_LUNID, TYPE_CMD);
-
-	if (return_code == IO_OK)
-		listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
-	else {	/* reading number of logical volumes failed */
-		dev_warn(&h->pdev->dev,
-			"report logical volume command failed\n");
-		listlength = 0;
-		goto freeret;
-	}
-
-	num_luns = listlength / 8;	/* 8 bytes per entry */
-	if (num_luns > CISS_MAX_LUN) {
-		num_luns = CISS_MAX_LUN;
-		dev_warn(&h->pdev->dev, "more luns configured"
-		       " on controller than can be handled by"
-		       " this driver.\n");
-	}
-
-	if (num_luns == 0)
-		cciss_add_controller_node(h);
-
-	/* Compare controller drive array to driver's drive array
-	 * to see if any drives are missing on the controller due
-	 * to action of Array Config Utility (user deletes drive)
-	 * and deregister logical drives which have disappeared.
-	 */
-	for (i = 0; i <= h->highest_lun; i++) {
-		int j;
-		drv_found = 0;
-
-		/* skip holes in the array from already deleted drives */
-		if (h->drv[i] == NULL)
-			continue;
-
-		for (j = 0; j < num_luns; j++) {
-			memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid));
-			if (memcmp(h->drv[i]->LunID, lunid,
-				sizeof(lunid)) == 0) {
-				drv_found = 1;
-				break;
-			}
-		}
-		if (!drv_found) {
-			/* Deregister it from the OS, it's gone. */
-			spin_lock_irqsave(&h->lock, flags);
-			h->drv[i]->busy_configuring = 1;
-			spin_unlock_irqrestore(&h->lock, flags);
-			return_code = deregister_disk(h, i, 1, via_ioctl);
-			if (h->drv[i] != NULL)
-				h->drv[i]->busy_configuring = 0;
-		}
-	}
-
-	/* Compare controller drive array to driver's drive array.
-	 * Check for updates in the drive information and any new drives
-	 * on the controller due to ACU adding logical drives, or changing
-	 * a logical drive's size, etc.  Reregister any new/changed drives
-	 */
-	for (i = 0; i < num_luns; i++) {
-		int j;
-
-		drv_found = 0;
-
-		memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid));
-		/* Find if the LUN is already in the drive array
-		 * of the driver.  If so then update its info
-		 * if not in use.  If it does not exist then find
-		 * the first free index and add it.
-		 */
-		for (j = 0; j <= h->highest_lun; j++) {
-			if (h->drv[j] != NULL &&
-				memcmp(h->drv[j]->LunID, lunid,
-					sizeof(h->drv[j]->LunID)) == 0) {
-				drv_index = j;
-				drv_found = 1;
-				break;
-			}
-		}
-
-		/* check if the drive was found already in the array */
-		if (!drv_found) {
-			drv_index = cciss_add_gendisk(h, lunid, 0);
-			if (drv_index == -1)
-				goto freeret;
-		}
-		cciss_update_drive_info(h, drv_index, first_time, via_ioctl);
-	}		/* end for */
-
-freeret:
-	kfree(ld_buff);
-	h->busy_configuring = 0;
-	/* We return -1 here to tell the ACU that we have registered/updated
-	 * all of the drives that we can and to keep it from calling us
-	 * additional times.
-	 */
-	return -1;
-mem_msg:
-	dev_err(&h->pdev->dev, "out of memory\n");
-	h->busy_configuring = 0;
-	goto freeret;
-}
-
-static void cciss_clear_drive_info(drive_info_struct *drive_info)
-{
-	/* zero out the disk size info */
-	drive_info->nr_blocks = 0;
-	drive_info->block_size = 0;
-	drive_info->heads = 0;
-	drive_info->sectors = 0;
-	drive_info->cylinders = 0;
-	drive_info->raid_level = -1;
-	memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no));
-	memset(drive_info->model, 0, sizeof(drive_info->model));
-	memset(drive_info->rev, 0, sizeof(drive_info->rev));
-	memset(drive_info->vendor, 0, sizeof(drive_info->vendor));
-	/*
-	 * don't clear the LUNID though, we need to remember which
-	 * one this one is.
-	 */
-}
-
-/* This function will deregister the disk and it's queue from the
- * kernel.  It must be called with the controller lock held and the
- * drv structures busy_configuring flag set.  It's parameters are:
- *
- * disk = This is the disk to be deregistered
- * drv  = This is the drive_info_struct associated with the disk to be
- *        deregistered.  It contains information about the disk used
- *        by the driver.
- * clear_all = This flag determines whether or not the disk information
- *             is going to be completely cleared out and the highest_lun
- *             reset.  Sometimes we want to clear out information about
- *             the disk in preparation for re-adding it.  In this case
- *             the highest_lun should be left unchanged and the LunID
- *             should not be cleared.
- * via_ioctl
- *    This indicates whether we've reached this path via ioctl.
- *    This affects the maximum usage count allowed for c0d0 to be messed with.
- *    If this path is reached via ioctl(), then the max_usage_count will
- *    be 1, as the process calling ioctl() has got to have the device open.
- *    If we get here via sysfs, then the max usage count will be zero.
-*/
-static int deregister_disk(ctlr_info_t *h, int drv_index,
-			   int clear_all, int via_ioctl)
-{
-	int i;
-	struct gendisk *disk;
-	drive_info_struct *drv;
-	int recalculate_highest_lun;
-
-	if (!capable(CAP_SYS_RAWIO))
-		return -EPERM;
-
-	drv = h->drv[drv_index];
-	disk = h->gendisk[drv_index];
-
-	/* make sure logical volume is NOT is use */
-	if (clear_all || (h->gendisk[0] == disk)) {
-		if (drv->usage_count > via_ioctl)
-			return -EBUSY;
-	} else if (drv->usage_count > 0)
-		return -EBUSY;
-
-	recalculate_highest_lun = (drv == h->drv[h->highest_lun]);
-
-	/* invalidate the devices and deregister the disk.  If it is disk
-	 * zero do not deregister it but just zero out it's values.  This
-	 * allows us to delete disk zero but keep the controller registered.
-	 */
-	if (h->gendisk[0] != disk) {
-		struct request_queue *q = disk->queue;
-		if (disk->flags & GENHD_FL_UP) {
-			cciss_destroy_ld_sysfs_entry(h, drv_index, 0);
-			del_gendisk(disk);
-		}
-		if (q)
-			blk_cleanup_queue(q);
-		/* If clear_all is set then we are deleting the logical
-		 * drive, not just refreshing its info.  For drives
-		 * other than disk 0 we will call put_disk.  We do not
-		 * do this for disk 0 as we need it to be able to
-		 * configure the controller.
-		 */
-		if (clear_all){
-			/* This isn't pretty, but we need to find the
-			 * disk in our array and NULL our the pointer.
-			 * This is so that we will call alloc_disk if
-			 * this index is used again later.
-			 */
-			for (i=0; i < CISS_MAX_LUN; i++){
-				if (h->gendisk[i] == disk) {
-					h->gendisk[i] = NULL;
-					break;
-				}
-			}
-			put_disk(disk);
-		}
-	} else {
-		set_capacity(disk, 0);
-		cciss_clear_drive_info(drv);
-	}
-
-	--h->num_luns;
-
-	/* if it was the last disk, find the new hightest lun */
-	if (clear_all && recalculate_highest_lun) {
-		int newhighest = -1;
-		for (i = 0; i <= h->highest_lun; i++) {
-			/* if the disk has size > 0, it is available */
-			if (h->drv[i] && h->drv[i]->heads)
-				newhighest = i;
-		}
-		h->highest_lun = newhighest;
-	}
-	return 0;
-}
-
-static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
-		size_t size, __u8 page_code, unsigned char *scsi3addr,
-		int cmd_type)
-{
-	u64bit buff_dma_handle;
-	int status = IO_OK;
-
-	c->cmd_type = CMD_IOCTL_PEND;
-	c->Header.ReplyQueue = 0;
-	if (buff != NULL) {
-		c->Header.SGList = 1;
-		c->Header.SGTotal = 1;
-	} else {
-		c->Header.SGList = 0;
-		c->Header.SGTotal = 0;
-	}
-	c->Header.Tag.lower = c->busaddr;
-	memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
-
-	c->Request.Type.Type = cmd_type;
-	if (cmd_type == TYPE_CMD) {
-		switch (cmd) {
-		case CISS_INQUIRY:
-			/* are we trying to read a vital product page */
-			if (page_code != 0) {
-				c->Request.CDB[1] = 0x01;
-				c->Request.CDB[2] = page_code;
-			}
-			c->Request.CDBLen = 6;
-			c->Request.Type.Attribute = ATTR_SIMPLE;
-			c->Request.Type.Direction = XFER_READ;
-			c->Request.Timeout = 0;
-			c->Request.CDB[0] = CISS_INQUIRY;
-			c->Request.CDB[4] = size & 0xFF;
-			break;
-		case CISS_REPORT_LOG:
-		case CISS_REPORT_PHYS:
-			/* Talking to controller so It's a physical command
-			   mode = 00 target = 0.  Nothing to write.
-			 */
-			c->Request.CDBLen = 12;
-			c->Request.Type.Attribute = ATTR_SIMPLE;
-			c->Request.Type.Direction = XFER_READ;
-			c->Request.Timeout = 0;
-			c->Request.CDB[0] = cmd;
-			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
-			c->Request.CDB[7] = (size >> 16) & 0xFF;
-			c->Request.CDB[8] = (size >> 8) & 0xFF;
-			c->Request.CDB[9] = size & 0xFF;
-			break;
-
-		case CCISS_READ_CAPACITY:
-			c->Request.CDBLen = 10;
-			c->Request.Type.Attribute = ATTR_SIMPLE;
-			c->Request.Type.Direction = XFER_READ;
-			c->Request.Timeout = 0;
-			c->Request.CDB[0] = cmd;
-			break;
-		case CCISS_READ_CAPACITY_16:
-			c->Request.CDBLen = 16;
-			c->Request.Type.Attribute = ATTR_SIMPLE;
-			c->Request.Type.Direction = XFER_READ;
-			c->Request.Timeout = 0;
-			c->Request.CDB[0] = cmd;
-			c->Request.CDB[1] = 0x10;
-			c->Request.CDB[10] = (size >> 24) & 0xFF;
-			c->Request.CDB[11] = (size >> 16) & 0xFF;
-			c->Request.CDB[12] = (size >> 8) & 0xFF;
-			c->Request.CDB[13] = size & 0xFF;
-			c->Request.Timeout = 0;
-			c->Request.CDB[0] = cmd;
-			break;
-		case CCISS_CACHE_FLUSH:
-			c->Request.CDBLen = 12;
-			c->Request.Type.Attribute = ATTR_SIMPLE;
-			c->Request.Type.Direction = XFER_WRITE;
-			c->Request.Timeout = 0;
-			c->Request.CDB[0] = BMIC_WRITE;
-			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
-			c->Request.CDB[7] = (size >> 8) & 0xFF;
-			c->Request.CDB[8] = size & 0xFF;
-			break;
-		case TEST_UNIT_READY:
-			c->Request.CDBLen = 6;
-			c->Request.Type.Attribute = ATTR_SIMPLE;
-			c->Request.Type.Direction = XFER_NONE;
-			c->Request.Timeout = 0;
-			break;
-		default:
-			dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd);
-			return IO_ERROR;
-		}
-	} else if (cmd_type == TYPE_MSG) {
-		switch (cmd) {
-		case CCISS_ABORT_MSG:
-			c->Request.CDBLen = 12;
-			c->Request.Type.Attribute = ATTR_SIMPLE;
-			c->Request.Type.Direction = XFER_WRITE;
-			c->Request.Timeout = 0;
-			c->Request.CDB[0] = cmd;	/* abort */
-			c->Request.CDB[1] = 0;	/* abort a command */
-			/* buff contains the tag of the command to abort */
-			memcpy(&c->Request.CDB[4], buff, 8);
-			break;
-		case CCISS_RESET_MSG:
-			c->Request.CDBLen = 16;
-			c->Request.Type.Attribute = ATTR_SIMPLE;
-			c->Request.Type.Direction = XFER_NONE;
-			c->Request.Timeout = 0;
-			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
-			c->Request.CDB[0] = cmd;	/* reset */
-			c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET;
-			break;
-		case CCISS_NOOP_MSG:
-			c->Request.CDBLen = 1;
-			c->Request.Type.Attribute = ATTR_SIMPLE;
-			c->Request.Type.Direction = XFER_WRITE;
-			c->Request.Timeout = 0;
-			c->Request.CDB[0] = cmd;
-			break;
-		default:
-			dev_warn(&h->pdev->dev,
-				"unknown message type %d\n", cmd);
-			return IO_ERROR;
-		}
-	} else {
-		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
-		return IO_ERROR;
-	}
-	/* Fill in the scatter gather information */
-	if (size > 0) {
-		buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
-							     buff, size,
-							     PCI_DMA_BIDIRECTIONAL);
-		c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
-		c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
-		c->SG[0].Len = size;
-		c->SG[0].Ext = 0;	/* we are not chaining */
-	}
-	return status;
-}
-
-static int cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
-			    u8 reset_type)
-{
-	CommandList_struct *c;
-	int return_status;
-
-	c = cmd_alloc(h);
-	if (!c)
-		return -ENOMEM;
-	return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0,
-		CTLR_LUNID, TYPE_MSG);
-	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
-	if (return_status != IO_OK) {
-		cmd_special_free(h, c);
-		return return_status;
-	}
-	c->waiting = NULL;
-	enqueue_cmd_and_start_io(h, c);
-	/* Don't wait for completion, the reset won't complete.  Don't free
-	 * the command either.  This is the last command we will send before
-	 * re-initializing everything, so it doesn't matter and won't leak.
-	 */
-	return 0;
-}
-
-static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
-{
-	switch (c->err_info->ScsiStatus) {
-	case SAM_STAT_GOOD:
-		return IO_OK;
-	case SAM_STAT_CHECK_CONDITION:
-		switch (0xf & c->err_info->SenseInfo[2]) {
-		case 0: return IO_OK; /* no sense */
-		case 1: return IO_OK; /* recovered error */
-		default:
-			if (check_for_unit_attention(h, c))
-				return IO_NEEDS_RETRY;
-			dev_warn(&h->pdev->dev, "cmd 0x%02x "
-				"check condition, sense key = 0x%02x\n",
-				c->Request.CDB[0], c->err_info->SenseInfo[2]);
-		}
-		break;
-	default:
-		dev_warn(&h->pdev->dev, "cmd 0x%02x"
-			"scsi status = 0x%02x\n",
-			c->Request.CDB[0], c->err_info->ScsiStatus);
-		break;
-	}
-	return IO_ERROR;
-}
-
-static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
-{
-	int return_status = IO_OK;
-
-	if (c->err_info->CommandStatus == CMD_SUCCESS)
-		return IO_OK;
-
-	switch (c->err_info->CommandStatus) {
-	case CMD_TARGET_STATUS:
-		return_status = check_target_status(h, c);
-		break;
-	case CMD_DATA_UNDERRUN:
-	case CMD_DATA_OVERRUN:
-		/* expected for inquiry and report lun commands */
-		break;
-	case CMD_INVALID:
-		dev_warn(&h->pdev->dev, "cmd 0x%02x is "
-		       "reported invalid\n", c->Request.CDB[0]);
-		return_status = IO_ERROR;
-		break;
-	case CMD_PROTOCOL_ERR:
-		dev_warn(&h->pdev->dev, "cmd 0x%02x has "
-		       "protocol error\n", c->Request.CDB[0]);
-		return_status = IO_ERROR;
-		break;
-	case CMD_HARDWARE_ERR:
-		dev_warn(&h->pdev->dev, "cmd 0x%02x had "
-		       " hardware error\n", c->Request.CDB[0]);
-		return_status = IO_ERROR;
-		break;
-	case CMD_CONNECTION_LOST:
-		dev_warn(&h->pdev->dev, "cmd 0x%02x had "
-		       "connection lost\n", c->Request.CDB[0]);
-		return_status = IO_ERROR;
-		break;
-	case CMD_ABORTED:
-		dev_warn(&h->pdev->dev, "cmd 0x%02x was "
-		       "aborted\n", c->Request.CDB[0]);
-		return_status = IO_ERROR;
-		break;
-	case CMD_ABORT_FAILED:
-		dev_warn(&h->pdev->dev, "cmd 0x%02x reports "
-		       "abort failed\n", c->Request.CDB[0]);
-		return_status = IO_ERROR;
-		break;
-	case CMD_UNSOLICITED_ABORT:
-		dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n",
-			c->Request.CDB[0]);
-		return_status = IO_NEEDS_RETRY;
-		break;
-	case CMD_UNABORTABLE:
-		dev_warn(&h->pdev->dev, "cmd unabortable\n");
-		return_status = IO_ERROR;
-		break;
-	default:
-		dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
-		       "unknown status %x\n", c->Request.CDB[0],
-		       c->err_info->CommandStatus);
-		return_status = IO_ERROR;
-	}
-	return return_status;
-}
-
-static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
-	int attempt_retry)
-{
-	DECLARE_COMPLETION_ONSTACK(wait);
-	u64bit buff_dma_handle;
-	int return_status = IO_OK;
-
-resend_cmd2:
-	c->waiting = &wait;
-	enqueue_cmd_and_start_io(h, c);
-
-	wait_for_completion(&wait);
-
-	if (c->err_info->CommandStatus == 0 || !attempt_retry)
-		goto command_done;
-
-	return_status = process_sendcmd_error(h, c);
-
-	if (return_status == IO_NEEDS_RETRY &&
-		c->retry_count < MAX_CMD_RETRIES) {
-		dev_warn(&h->pdev->dev, "retrying 0x%02x\n",
-			c->Request.CDB[0]);
-		c->retry_count++;
-		/* erase the old error information */
-		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
-		return_status = IO_OK;
-		reinit_completion(&wait);
-		goto resend_cmd2;
-	}
-
-command_done:
-	/* unlock the buffers from DMA */
-	buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
-	buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
-	pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
-			 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
-	return return_status;
-}
-
-static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
-			   __u8 page_code, unsigned char scsi3addr[],
-			int cmd_type)
-{
-	CommandList_struct *c;
-	int return_status;
-
-	c = cmd_special_alloc(h);
-	if (!c)
-		return -ENOMEM;
-	return_status = fill_cmd(h, c, cmd, buff, size, page_code,
-		scsi3addr, cmd_type);
-	if (return_status == IO_OK)
-		return_status = sendcmd_withirq_core(h, c, 1);
-
-	cmd_special_free(h, c);
-	return return_status;
-}
-
-static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
-				   sector_t total_size,
-				   unsigned int block_size,
-				   InquiryData_struct *inq_buff,
-				   drive_info_struct *drv)
-{
-	int return_code;
-	unsigned long t;
-	unsigned char scsi3addr[8];
-
-	memset(inq_buff, 0, sizeof(InquiryData_struct));
-	log_unit_to_scsi3addr(h, scsi3addr, logvol);
-	return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
-			sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD);
-	if (return_code == IO_OK) {
-		if (inq_buff->data_byte[8] == 0xFF) {
-			dev_warn(&h->pdev->dev,
-			       "reading geometry failed, volume "
-			       "does not support reading geometry\n");
-			drv->heads = 255;
-			drv->sectors = 32;	/* Sectors per track */
-			drv->cylinders = total_size + 1;
-			drv->raid_level = RAID_UNKNOWN;
-		} else {
-			drv->heads = inq_buff->data_byte[6];
-			drv->sectors = inq_buff->data_byte[7];
-			drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
-			drv->cylinders += inq_buff->data_byte[5];
-			drv->raid_level = inq_buff->data_byte[8];
-		}
-		drv->block_size = block_size;
-		drv->nr_blocks = total_size + 1;
-		t = drv->heads * drv->sectors;
-		if (t > 1) {
-			sector_t real_size = total_size + 1;
-			unsigned long rem = sector_div(real_size, t);
-			if (rem)
-				real_size++;
-			drv->cylinders = real_size;
-		}
-	} else {		/* Get geometry failed */
-		dev_warn(&h->pdev->dev, "reading geometry failed\n");
-	}
-}
-
-static void
-cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size,
-		    unsigned int *block_size)
-{
-	ReadCapdata_struct *buf;
-	int return_code;
-	unsigned char scsi3addr[8];
-
-	buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
-	if (!buf) {
-		dev_warn(&h->pdev->dev, "out of memory\n");
-		return;
-	}
-
-	log_unit_to_scsi3addr(h, scsi3addr, logvol);
-	return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf,
-		sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD);
-	if (return_code == IO_OK) {
-		*total_size = be32_to_cpu(*(__be32 *) buf->total_size);
-		*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
-	} else {		/* read capacity command failed */
-		dev_warn(&h->pdev->dev, "read capacity failed\n");
-		*total_size = 0;
-		*block_size = BLOCK_SIZE;
-	}
-	kfree(buf);
-}
-
-static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
-	sector_t *total_size, unsigned int *block_size)
-{
-	ReadCapdata_struct_16 *buf;
-	int return_code;
-	unsigned char scsi3addr[8];
-
-	buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
-	if (!buf) {
-		dev_warn(&h->pdev->dev, "out of memory\n");
-		return;
-	}
-
-	log_unit_to_scsi3addr(h, scsi3addr, logvol);
-	return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16,
-		buf, sizeof(ReadCapdata_struct_16),
-			0, scsi3addr, TYPE_CMD);
-	if (return_code == IO_OK) {
-		*total_size = be64_to_cpu(*(__be64 *) buf->total_size);
-		*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
-	} else {		/* read capacity command failed */
-		dev_warn(&h->pdev->dev, "read capacity failed\n");
-		*total_size = 0;
-		*block_size = BLOCK_SIZE;
-	}
-	dev_info(&h->pdev->dev, "      blocks= %llu block_size= %d\n",
-	       (unsigned long long)*total_size+1, *block_size);
-	kfree(buf);
-}
-
-static int cciss_revalidate(struct gendisk *disk)
-{
-	ctlr_info_t *h = get_host(disk);
-	drive_info_struct *drv = get_drv(disk);
-	int logvol;
-	int FOUND = 0;
-	unsigned int block_size;
-	sector_t total_size;
-	InquiryData_struct *inq_buff = NULL;
-
-	for (logvol = 0; logvol <= h->highest_lun; logvol++) {
-		if (!h->drv[logvol])
-			continue;
-		if (memcmp(h->drv[logvol]->LunID, drv->LunID,
-			sizeof(drv->LunID)) == 0) {
-			FOUND = 1;
-			break;
-		}
-	}
-
-	if (!FOUND)
-		return 1;
-
-	inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-	if (inq_buff == NULL) {
-		dev_warn(&h->pdev->dev, "out of memory\n");
-		return 1;
-	}
-	if (h->cciss_read == CCISS_READ_10) {
-		cciss_read_capacity(h, logvol,
-					&total_size, &block_size);
-	} else {
-		cciss_read_capacity_16(h, logvol,
-					&total_size, &block_size);
-	}
-	cciss_geometry_inquiry(h, logvol, total_size, block_size,
-			       inq_buff, drv);
-
-	blk_queue_logical_block_size(drv->queue, drv->block_size);
-	set_capacity(disk, drv->nr_blocks);
-
-	kfree(inq_buff);
-	return 0;
-}
-
-/*
- * Map (physical) PCI mem into (virtual) kernel space
- */
-static void __iomem *remap_pci_mem(ulong base, ulong size)
-{
-	ulong page_base = ((ulong) base) & PAGE_MASK;
-	ulong page_offs = ((ulong) base) - page_base;
-	void __iomem *page_remapped = ioremap(page_base, page_offs + size);
-
-	return page_remapped ? (page_remapped + page_offs) : NULL;
-}
-
-/*
- * Takes jobs of the Q and sends them to the hardware, then puts it on
- * the Q to wait for completion.
- */
-static void start_io(ctlr_info_t *h)
-{
-	CommandList_struct *c;
-
-	while (!list_empty(&h->reqQ)) {
-		c = list_entry(h->reqQ.next, CommandList_struct, list);
-		/* can't do anything if fifo is full */
-		if ((h->access.fifo_full(h))) {
-			dev_warn(&h->pdev->dev, "fifo full\n");
-			break;
-		}
-
-		/* Get the first entry from the Request Q */
-		removeQ(c);
-		h->Qdepth--;
-
-		/* Tell the controller execute command */
-		h->access.submit_command(h, c);
-
-		/* Put job onto the completed Q */
-		addQ(&h->cmpQ, c);
-	}
-}
-
-/* Assumes that h->lock is held. */
-/* Zeros out the error record and then resends the command back */
-/* to the controller */
-static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
-{
-	/* erase the old error information */
-	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
-
-	/* add it to software queue and then send it to the controller */
-	addQ(&h->reqQ, c);
-	h->Qdepth++;
-	if (h->Qdepth > h->maxQsinceinit)
-		h->maxQsinceinit = h->Qdepth;
-
-	start_io(h);
-}
-
-static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
-	unsigned int msg_byte, unsigned int host_byte,
-	unsigned int driver_byte)
-{
-	/* inverse of macros in scsi.h */
-	return (scsi_status_byte & 0xff) |
-		((msg_byte & 0xff) << 8) |
-		((host_byte & 0xff) << 16) |
-		((driver_byte & 0xff) << 24);
-}
-
-static inline int evaluate_target_status(ctlr_info_t *h,
-			CommandList_struct *cmd, int *retry_cmd)
-{
-	unsigned char sense_key;
-	unsigned char status_byte, msg_byte, host_byte, driver_byte;
-	int error_value;
-
-	*retry_cmd = 0;
-	/* If we get in here, it means we got "target status", that is, scsi status */
-	status_byte = cmd->err_info->ScsiStatus;
-	driver_byte = DRIVER_OK;
-	msg_byte = cmd->err_info->CommandStatus; /* correct?  seems too device specific */
-
-	if (blk_rq_is_passthrough(cmd->rq))
-		host_byte = DID_PASSTHROUGH;
-	else
-		host_byte = DID_OK;
-
-	error_value = make_status_bytes(status_byte, msg_byte,
-		host_byte, driver_byte);
-
-	if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
-		if (!blk_rq_is_passthrough(cmd->rq))
-			dev_warn(&h->pdev->dev, "cmd %p "
-			       "has SCSI Status 0x%x\n",
-			       cmd, cmd->err_info->ScsiStatus);
-		return error_value;
-	}
-
-	/* check the sense key */
-	sense_key = 0xf & cmd->err_info->SenseInfo[2];
-	/* no status or recovered error */
-	if (((sense_key == 0x0) || (sense_key == 0x1)) &&
-	    !blk_rq_is_passthrough(cmd->rq))
-		error_value = 0;
-
-	if (check_for_unit_attention(h, cmd)) {
-		*retry_cmd = !blk_rq_is_passthrough(cmd->rq);
-		return 0;
-	}
-
-	/* Not SG_IO or similar? */
-	if (!blk_rq_is_passthrough(cmd->rq)) {
-		if (error_value != 0)
-			dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
-			       " sense key = 0x%x\n", cmd, sense_key);
-		return error_value;
-	}
-
-	scsi_req(cmd->rq)->sense_len = cmd->err_info->SenseLen;
-	return error_value;
-}
-
-/* checks the status of the job and calls complete buffers to mark all
- * buffers for the completed job. Note that this function does not need
- * to hold the hba/queue lock.
- */
-static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
-				    int timeout)
-{
-	int retry_cmd = 0;
-	struct request *rq = cmd->rq;
-	struct scsi_request *sreq = scsi_req(rq);
-
-	sreq->result = 0;
-
-	if (timeout)
-		sreq->result = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
-
-	if (cmd->err_info->CommandStatus == 0)	/* no error has occurred */
-		goto after_error_processing;
-
-	switch (cmd->err_info->CommandStatus) {
-	case CMD_TARGET_STATUS:
-		sreq->result = evaluate_target_status(h, cmd, &retry_cmd);
-		break;
-	case CMD_DATA_UNDERRUN:
-		if (!blk_rq_is_passthrough(cmd->rq)) {
-			dev_warn(&h->pdev->dev, "cmd %p has"
-			       " completed with data underrun "
-			       "reported\n", cmd);
-		}
-		break;
-	case CMD_DATA_OVERRUN:
-		if (!blk_rq_is_passthrough(cmd->rq))
-			dev_warn(&h->pdev->dev, "cciss: cmd %p has"
-			       " completed with data overrun "
-			       "reported\n", cmd);
-		break;
-	case CMD_INVALID:
-		dev_warn(&h->pdev->dev, "cciss: cmd %p is "
-		       "reported invalid\n", cmd);
-		sreq->result = make_status_bytes(SAM_STAT_GOOD,
-			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_rq_is_passthrough(cmd->rq) ?
-				DID_PASSTHROUGH : DID_ERROR);
-		break;
-	case CMD_PROTOCOL_ERR:
-		dev_warn(&h->pdev->dev, "cciss: cmd %p has "
-		       "protocol error\n", cmd);
-		sreq->result = make_status_bytes(SAM_STAT_GOOD,
-			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_rq_is_passthrough(cmd->rq) ?
-				DID_PASSTHROUGH : DID_ERROR);
-		break;
-	case CMD_HARDWARE_ERR:
-		dev_warn(&h->pdev->dev, "cciss: cmd %p had "
-		       " hardware error\n", cmd);
-		sreq->result = make_status_bytes(SAM_STAT_GOOD,
-			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_rq_is_passthrough(cmd->rq) ?
-				DID_PASSTHROUGH : DID_ERROR);
-		break;
-	case CMD_CONNECTION_LOST:
-		dev_warn(&h->pdev->dev, "cciss: cmd %p had "
-		       "connection lost\n", cmd);
-		sreq->result = make_status_bytes(SAM_STAT_GOOD,
-			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_rq_is_passthrough(cmd->rq) ?
-				DID_PASSTHROUGH : DID_ERROR);
-		break;
-	case CMD_ABORTED:
-		dev_warn(&h->pdev->dev, "cciss: cmd %p was "
-		       "aborted\n", cmd);
-		sreq->result = make_status_bytes(SAM_STAT_GOOD,
-			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_rq_is_passthrough(cmd->rq) ?
-				DID_PASSTHROUGH : DID_ABORT);
-		break;
-	case CMD_ABORT_FAILED:
-		dev_warn(&h->pdev->dev, "cciss: cmd %p reports "
-		       "abort failed\n", cmd);
-		sreq->result = make_status_bytes(SAM_STAT_GOOD,
-			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_rq_is_passthrough(cmd->rq) ?
-				DID_PASSTHROUGH : DID_ERROR);
-		break;
-	case CMD_UNSOLICITED_ABORT:
-		dev_warn(&h->pdev->dev, "cciss%d: unsolicited "
-		       "abort %p\n", h->ctlr, cmd);
-		if (cmd->retry_count < MAX_CMD_RETRIES) {
-			retry_cmd = 1;
-			dev_warn(&h->pdev->dev, "retrying %p\n", cmd);
-			cmd->retry_count++;
-		} else
-			dev_warn(&h->pdev->dev,
-				"%p retried too many times\n", cmd);
-		sreq->result = make_status_bytes(SAM_STAT_GOOD,
-			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_rq_is_passthrough(cmd->rq) ?
-				DID_PASSTHROUGH : DID_ABORT);
-		break;
-	case CMD_TIMEOUT:
-		dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
-		sreq->result = make_status_bytes(SAM_STAT_GOOD,
-			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_rq_is_passthrough(cmd->rq) ?
-				DID_PASSTHROUGH : DID_ERROR);
-		break;
-	case CMD_UNABORTABLE:
-		dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
-		sreq->result = make_status_bytes(SAM_STAT_GOOD,
-			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_rq_is_passthrough(cmd->rq) ?
-				DID_PASSTHROUGH : DID_ERROR);
-		break;
-	default:
-		dev_warn(&h->pdev->dev, "cmd %p returned "
-		       "unknown status %x\n", cmd,
-		       cmd->err_info->CommandStatus);
-		sreq->result = make_status_bytes(SAM_STAT_GOOD,
-			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_rq_is_passthrough(cmd->rq) ?
-				DID_PASSTHROUGH : DID_ERROR);
-	}
-
-after_error_processing:
-
-	/* We need to return this command */
-	if (retry_cmd) {
-		resend_cciss_cmd(h, cmd);
-		return;
-	}
-	cmd->rq->completion_data = cmd;
-	blk_complete_request(cmd->rq);
-}
-
-static inline u32 cciss_tag_contains_index(u32 tag)
-{
-#define DIRECT_LOOKUP_BIT 0x10
-	return tag & DIRECT_LOOKUP_BIT;
-}
-
-static inline u32 cciss_tag_to_index(u32 tag)
-{
-#define DIRECT_LOOKUP_SHIFT 5
-	return tag >> DIRECT_LOOKUP_SHIFT;
-}
-
-static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag)
-{
-#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
-#define CCISS_SIMPLE_ERROR_BITS 0x03
-	if (likely(h->transMethod & CFGTBL_Trans_Performant))
-		return tag & ~CCISS_PERF_ERROR_BITS;
-	return tag & ~CCISS_SIMPLE_ERROR_BITS;
-}
-
-static inline void cciss_mark_tag_indexed(u32 *tag)
-{
-	*tag |= DIRECT_LOOKUP_BIT;
-}
-
-static inline void cciss_set_tag_index(u32 *tag, u32 index)
-{
-	*tag |= (index << DIRECT_LOOKUP_SHIFT);
-}
-
-/*
- * Get a request and submit it to the controller.
- */
-static void do_cciss_request(struct request_queue *q)
-{
-	ctlr_info_t *h = q->queuedata;
-	CommandList_struct *c;
-	sector_t start_blk;
-	int seg;
-	struct request *creq;
-	u64bit temp64;
-	struct scatterlist *tmp_sg;
-	SGDescriptor_struct *curr_sg;
-	drive_info_struct *drv;
-	int i, dir;
-	int sg_index = 0;
-	int chained = 0;
-
-      queue:
-	creq = blk_peek_request(q);
-	if (!creq)
-		goto startio;
-
-	BUG_ON(creq->nr_phys_segments > h->maxsgentries);
-
-	c = cmd_alloc(h);
-	if (!c)
-		goto full;
-
-	blk_start_request(creq);
-
-	tmp_sg = h->scatter_list[c->cmdindex];
-	spin_unlock_irq(q->queue_lock);
-
-	c->cmd_type = CMD_RWREQ;
-	c->rq = creq;
-
-	/* fill in the request */
-	drv = creq->rq_disk->private_data;
-	c->Header.ReplyQueue = 0;	/* unused in simple mode */
-	/* got command from pool, so use the command block index instead */
-	/* for direct lookups. */
-	/* The first 2 bits are reserved for controller error reporting. */
-	cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex);
-	cciss_mark_tag_indexed(&c->Header.Tag.lower);
-	memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
-	c->Request.CDBLen = 10;	/* 12 byte commands not in FW yet; */
-	c->Request.Type.Type = TYPE_CMD;	/* It is a command. */
-	c->Request.Type.Attribute = ATTR_SIMPLE;
-	c->Request.Type.Direction =
-	    (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
-	c->Request.Timeout = 0;	/* Don't time out */
-	c->Request.CDB[0] =
-	    (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
-	start_blk = blk_rq_pos(creq);
-	dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n",
-	       (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
-	sg_init_table(tmp_sg, h->maxsgentries);
-	seg = blk_rq_map_sg(q, creq, tmp_sg);
-
-	/* get the DMA records for the setup */
-	if (c->Request.Type.Direction == XFER_READ)
-		dir = PCI_DMA_FROMDEVICE;
-	else
-		dir = PCI_DMA_TODEVICE;
-
-	curr_sg = c->SG;
-	sg_index = 0;
-	chained = 0;
-
-	for (i = 0; i < seg; i++) {
-		if (((sg_index+1) == (h->max_cmd_sgentries)) &&
-			!chained && ((seg - i) > 1)) {
-			/* Point to next chain block. */
-			curr_sg = h->cmd_sg_list[c->cmdindex];
-			sg_index = 0;
-			chained = 1;
-		}
-		curr_sg[sg_index].Len = tmp_sg[i].length;
-		temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
-						tmp_sg[i].offset,
-						tmp_sg[i].length, dir);
-		if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
-			dev_warn(&h->pdev->dev,
-				"%s: error mapping page for DMA\n", __func__);
-			scsi_req(creq)->result =
-				make_status_bytes(SAM_STAT_GOOD, 0, DRIVER_OK,
-						  DID_SOFT_ERROR);
-			cmd_free(h, c);
-			return;
-		}
-		curr_sg[sg_index].Addr.lower = temp64.val32.lower;
-		curr_sg[sg_index].Addr.upper = temp64.val32.upper;
-		curr_sg[sg_index].Ext = 0;  /* we are not chaining */
-		++sg_index;
-	}
-	if (chained) {
-		if (cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex],
-			(seg - (h->max_cmd_sgentries - 1)) *
-				sizeof(SGDescriptor_struct))) {
-			scsi_req(creq)->result =
-				make_status_bytes(SAM_STAT_GOOD, 0, DRIVER_OK,
-						  DID_SOFT_ERROR);
-			cmd_free(h, c);
-			return;
-		}
-	}
-
-	/* track how many SG entries we are using */
-	if (seg > h->maxSG)
-		h->maxSG = seg;
-
-	dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments "
-			"chained[%d]\n",
-			blk_rq_sectors(creq), seg, chained);
-
-	c->Header.SGTotal = seg + chained;
-	if (seg <= h->max_cmd_sgentries)
-		c->Header.SGList = c->Header.SGTotal;
-	else
-		c->Header.SGList = h->max_cmd_sgentries;
-	set_performant_mode(h, c);
-
-	switch (req_op(creq)) {
-	case REQ_OP_READ:
-	case REQ_OP_WRITE:
-		if(h->cciss_read == CCISS_READ_10) {
-			c->Request.CDB[1] = 0;
-			c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
-			c->Request.CDB[3] = (start_blk >> 16) & 0xff;
-			c->Request.CDB[4] = (start_blk >> 8) & 0xff;
-			c->Request.CDB[5] = start_blk & 0xff;
-			c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */
-			c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
-			c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
-			c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
-		} else {
-			u32 upper32 = upper_32_bits(start_blk);
-
-			c->Request.CDBLen = 16;
-			c->Request.CDB[1]= 0;
-			c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */
-			c->Request.CDB[3]= (upper32 >> 16) & 0xff;
-			c->Request.CDB[4]= (upper32 >>  8) & 0xff;
-			c->Request.CDB[5]= upper32 & 0xff;
-			c->Request.CDB[6]= (start_blk >> 24) & 0xff;
-			c->Request.CDB[7]= (start_blk >> 16) & 0xff;
-			c->Request.CDB[8]= (start_blk >>  8) & 0xff;
-			c->Request.CDB[9]= start_blk & 0xff;
-			c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
-			c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
-			c->Request.CDB[12]= (blk_rq_sectors(creq) >>  8) & 0xff;
-			c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
-			c->Request.CDB[14] = c->Request.CDB[15] = 0;
-		}
-		break;
-	case REQ_OP_SCSI_IN:
-	case REQ_OP_SCSI_OUT:
-		c->Request.CDBLen = scsi_req(creq)->cmd_len;
-		memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB);
-		scsi_req(creq)->sense = c->err_info->SenseInfo;
-		break;
-	default:
-		dev_warn(&h->pdev->dev, "bad request type %d\n",
-			creq->cmd_flags);
-		BUG();
-	}
-
-	spin_lock_irq(q->queue_lock);
-
-	addQ(&h->reqQ, c);
-	h->Qdepth++;
-	if (h->Qdepth > h->maxQsinceinit)
-		h->maxQsinceinit = h->Qdepth;
-
-	goto queue;
-full:
-	blk_stop_queue(q);
-startio:
-	/* We will already have the driver lock here so not need
-	 * to lock it.
-	 */
-	start_io(h);
-}
-
-static inline unsigned long get_next_completion(ctlr_info_t *h)
-{
-	return h->access.command_completed(h);
-}
-
-static inline int interrupt_pending(ctlr_info_t *h)
-{
-	return h->access.intr_pending(h);
-}
-
-static inline long interrupt_not_for_us(ctlr_info_t *h)
-{
-	return ((h->access.intr_pending(h) == 0) ||
-		(h->interrupts_enabled == 0));
-}
-
-static inline int bad_tag(ctlr_info_t *h, u32 tag_index,
-			u32 raw_tag)
-{
-	if (unlikely(tag_index >= h->nr_cmds)) {
-		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
-		return 1;
-	}
-	return 0;
-}
-
-static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c,
-				u32 raw_tag)
-{
-	removeQ(c);
-	if (likely(c->cmd_type == CMD_RWREQ))
-		complete_command(h, c, 0);
-	else if (c->cmd_type == CMD_IOCTL_PEND)
-		complete(c->waiting);
-#ifdef CONFIG_CISS_SCSI_TAPE
-	else if (c->cmd_type == CMD_SCSI)
-		complete_scsi_command(c, 0, raw_tag);
-#endif
-}
-
-static inline u32 next_command(ctlr_info_t *h)
-{
-	u32 a;
-
-	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
-		return h->access.command_completed(h);
-
-	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
-		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
-		(h->reply_pool_head)++;
-		h->commands_outstanding--;
-	} else {
-		a = FIFO_EMPTY;
-	}
-	/* Check for wraparound */
-	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
-		h->reply_pool_head = h->reply_pool;
-		h->reply_pool_wraparound ^= 1;
-	}
-	return a;
-}
-
-/* process completion of an indexed ("direct lookup") command */
-static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
-{
-	u32 tag_index;
-	CommandList_struct *c;
-
-	tag_index = cciss_tag_to_index(raw_tag);
-	if (bad_tag(h, tag_index, raw_tag))
-		return next_command(h);
-	c = h->cmd_pool + tag_index;
-	finish_cmd(h, c, raw_tag);
-	return next_command(h);
-}
-
-/* process completion of a non-indexed command */
-static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
-{
-	CommandList_struct *c = NULL;
-	__u32 busaddr_masked, tag_masked;
-
-	tag_masked = cciss_tag_discard_error_bits(h, raw_tag);
-	list_for_each_entry(c, &h->cmpQ, list) {
-		busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr);
-		if (busaddr_masked == tag_masked) {
-			finish_cmd(h, c, raw_tag);
-			return next_command(h);
-		}
-	}
-	bad_tag(h, h->nr_cmds + 1, raw_tag);
-	return next_command(h);
-}
-
-/* Some controllers, like p400, will give us one interrupt
- * after a soft reset, even if we turned interrupts off.
- * Only need to check for this in the cciss_xxx_discard_completions
- * functions.
- */
-static int ignore_bogus_interrupt(ctlr_info_t *h)
-{
-	if (likely(!reset_devices))
-		return 0;
-
-	if (likely(h->interrupts_enabled))
-		return 0;
-
-	dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
-		"(known firmware bug.)  Ignoring.\n");
-
-	return 1;
-}
-
-static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id)
-{
-	ctlr_info_t *h = dev_id;
-	unsigned long flags;
-	u32 raw_tag;
-
-	if (ignore_bogus_interrupt(h))
-		return IRQ_NONE;
-
-	if (interrupt_not_for_us(h))
-		return IRQ_NONE;
-	spin_lock_irqsave(&h->lock, flags);
-	while (interrupt_pending(h)) {
-		raw_tag = get_next_completion(h);
-		while (raw_tag != FIFO_EMPTY)
-			raw_tag = next_command(h);
-	}
-	spin_unlock_irqrestore(&h->lock, flags);
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id)
-{
-	ctlr_info_t *h = dev_id;
-	unsigned long flags;
-	u32 raw_tag;
-
-	if (ignore_bogus_interrupt(h))
-		return IRQ_NONE;
-
-	spin_lock_irqsave(&h->lock, flags);
-	raw_tag = get_next_completion(h);
-	while (raw_tag != FIFO_EMPTY)
-		raw_tag = next_command(h);
-	spin_unlock_irqrestore(&h->lock, flags);
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t do_cciss_intx(int irq, void *dev_id)
-{
-	ctlr_info_t *h = dev_id;
-	unsigned long flags;
-	u32 raw_tag;
-
-	if (interrupt_not_for_us(h))
-		return IRQ_NONE;
-	spin_lock_irqsave(&h->lock, flags);
-	while (interrupt_pending(h)) {
-		raw_tag = get_next_completion(h);
-		while (raw_tag != FIFO_EMPTY) {
-			if (cciss_tag_contains_index(raw_tag))
-				raw_tag = process_indexed_cmd(h, raw_tag);
-			else
-				raw_tag = process_nonindexed_cmd(h, raw_tag);
-		}
-	}
-	spin_unlock_irqrestore(&h->lock, flags);
-	return IRQ_HANDLED;
-}
-
-/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never
- * check the interrupt pending register because it is not set.
- */
-static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id)
-{
-	ctlr_info_t *h = dev_id;
-	unsigned long flags;
-	u32 raw_tag;
-
-	spin_lock_irqsave(&h->lock, flags);
-	raw_tag = get_next_completion(h);
-	while (raw_tag != FIFO_EMPTY) {
-		if (cciss_tag_contains_index(raw_tag))
-			raw_tag = process_indexed_cmd(h, raw_tag);
-		else
-			raw_tag = process_nonindexed_cmd(h, raw_tag);
-	}
-	spin_unlock_irqrestore(&h->lock, flags);
-	return IRQ_HANDLED;
-}
-
-/**
- * add_to_scan_list() - add controller to rescan queue
- * @h:		      Pointer to the controller.
- *
- * Adds the controller to the rescan queue if not already on the queue.
- *
- * returns 1 if added to the queue, 0 if skipped (could be on the
- * queue already, or the controller could be initializing or shutting
- * down).
- **/
-static int add_to_scan_list(struct ctlr_info *h)
-{
-	struct ctlr_info *test_h;
-	int found = 0;
-	int ret = 0;
-
-	if (h->busy_initializing)
-		return 0;
-
-	if (!mutex_trylock(&h->busy_shutting_down))
-		return 0;
-
-	mutex_lock(&scan_mutex);
-	list_for_each_entry(test_h, &scan_q, scan_list) {
-		if (test_h == h) {
-			found = 1;
-			break;
-		}
-	}
-	if (!found && !h->busy_scanning) {
-		reinit_completion(&h->scan_wait);
-		list_add_tail(&h->scan_list, &scan_q);
-		ret = 1;
-	}
-	mutex_unlock(&scan_mutex);
-	mutex_unlock(&h->busy_shutting_down);
-
-	return ret;
-}
-
-/**
- * remove_from_scan_list() - remove controller from rescan queue
- * @h:			   Pointer to the controller.
- *
- * Removes the controller from the rescan queue if present. Blocks if
- * the controller is currently conducting a rescan.  The controller
- * can be in one of three states:
- * 1. Doesn't need a scan
- * 2. On the scan list, but not scanning yet (we remove it)
- * 3. Busy scanning (and not on the list). In this case we want to wait for
- *    the scan to complete to make sure the scanning thread for this
- *    controller is completely idle.
- **/
-static void remove_from_scan_list(struct ctlr_info *h)
-{
-	struct ctlr_info *test_h, *tmp_h;
-
-	mutex_lock(&scan_mutex);
-	list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) {
-		if (test_h == h) { /* state 2. */
-			list_del(&h->scan_list);
-			complete_all(&h->scan_wait);
-			mutex_unlock(&scan_mutex);
-			return;
-		}
-	}
-	if (h->busy_scanning) { /* state 3. */
-		mutex_unlock(&scan_mutex);
-		wait_for_completion(&h->scan_wait);
-	} else { /* state 1, nothing to do. */
-		mutex_unlock(&scan_mutex);
-	}
-}
-
-/**
- * scan_thread() - kernel thread used to rescan controllers
- * @data:	 Ignored.
- *
- * A kernel thread used scan for drive topology changes on
- * controllers. The thread processes only one controller at a time
- * using a queue.  Controllers are added to the queue using
- * add_to_scan_list() and removed from the queue either after done
- * processing or using remove_from_scan_list().
- *
- * returns 0.
- **/
-static int scan_thread(void *data)
-{
-	struct ctlr_info *h;
-
-	while (1) {
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule();
-		if (kthread_should_stop())
-			break;
-
-		while (1) {
-			mutex_lock(&scan_mutex);
-			if (list_empty(&scan_q)) {
-				mutex_unlock(&scan_mutex);
-				break;
-			}
-
-			h = list_entry(scan_q.next,
-				       struct ctlr_info,
-				       scan_list);
-			list_del(&h->scan_list);
-			h->busy_scanning = 1;
-			mutex_unlock(&scan_mutex);
-
-			rebuild_lun_table(h, 0, 0);
-			complete_all(&h->scan_wait);
-			mutex_lock(&scan_mutex);
-			h->busy_scanning = 0;
-			mutex_unlock(&scan_mutex);
-		}
-	}
-
-	return 0;
-}
-
-static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
-{
-	if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
-		return 0;
-
-	switch (c->err_info->SenseInfo[12]) {
-	case STATE_CHANGED:
-		dev_warn(&h->pdev->dev, "a state change "
-			"detected, command retried\n");
-		return 1;
-	break;
-	case LUN_FAILED:
-		dev_warn(&h->pdev->dev, "LUN failure "
-			"detected, action required\n");
-		return 1;
-	break;
-	case REPORT_LUNS_CHANGED:
-		dev_warn(&h->pdev->dev, "report LUN data changed\n");
-	/*
-	 * Here, we could call add_to_scan_list and wake up the scan thread,
-	 * except that it's quite likely that we will get more than one
-	 * REPORT_LUNS_CHANGED condition in quick succession, which means
-	 * that those which occur after the first one will likely happen
-	 * *during* the scan_thread's rescan.  And the rescan code is not
-	 * robust enough to restart in the middle, undoing what it has already
-	 * done, and it's not clear that it's even possible to do this, since
-	 * part of what it does is notify the block layer, which starts
-	 * doing it's own i/o to read partition tables and so on, and the
-	 * driver doesn't have visibility to know what might need undoing.
-	 * In any event, if possible, it is horribly complicated to get right
-	 * so we just don't do it for now.
-	 *
-	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
-	 */
-		return 1;
-	break;
-	case POWER_OR_RESET:
-		dev_warn(&h->pdev->dev,
-			"a power on or device reset detected\n");
-		return 1;
-	break;
-	case UNIT_ATTENTION_CLEARED:
-		dev_warn(&h->pdev->dev,
-			"unit attention cleared by another initiator\n");
-		return 1;
-	break;
-	default:
-		dev_warn(&h->pdev->dev, "unknown unit attention detected\n");
-		return 1;
-	}
-}
-
-/*
- *  We cannot read the structure directly, for portability we must use
- *   the io functions.
- *   This is for debug only.
- */
-static void print_cfg_table(ctlr_info_t *h)
-{
-	int i;
-	char temp_name[17];
-	CfgTable_struct *tb = h->cfgtable;
-
-	dev_dbg(&h->pdev->dev, "Controller Configuration information\n");
-	dev_dbg(&h->pdev->dev, "------------------------------------\n");
-	for (i = 0; i < 4; i++)
-		temp_name[i] = readb(&(tb->Signature[i]));
-	temp_name[4] = '\0';
-	dev_dbg(&h->pdev->dev, "   Signature = %s\n", temp_name);
-	dev_dbg(&h->pdev->dev, "   Spec Number = %d\n",
-		readl(&(tb->SpecValence)));
-	dev_dbg(&h->pdev->dev, "   Transport methods supported = 0x%x\n",
-	       readl(&(tb->TransportSupport)));
-	dev_dbg(&h->pdev->dev, "   Transport methods active = 0x%x\n",
-	       readl(&(tb->TransportActive)));
-	dev_dbg(&h->pdev->dev, "   Requested transport Method = 0x%x\n",
-	       readl(&(tb->HostWrite.TransportRequest)));
-	dev_dbg(&h->pdev->dev, "   Coalesce Interrupt Delay = 0x%x\n",
-	       readl(&(tb->HostWrite.CoalIntDelay)));
-	dev_dbg(&h->pdev->dev, "   Coalesce Interrupt Count = 0x%x\n",
-	       readl(&(tb->HostWrite.CoalIntCount)));
-	dev_dbg(&h->pdev->dev, "   Max outstanding commands = 0x%x\n",
-	       readl(&(tb->CmdsOutMax)));
-	dev_dbg(&h->pdev->dev, "   Bus Types = 0x%x\n",
-		readl(&(tb->BusTypes)));
-	for (i = 0; i < 16; i++)
-		temp_name[i] = readb(&(tb->ServerName[i]));
-	temp_name[16] = '\0';
-	dev_dbg(&h->pdev->dev, "   Server Name = %s\n", temp_name);
-	dev_dbg(&h->pdev->dev, "   Heartbeat Counter = 0x%x\n\n\n",
-		readl(&(tb->HeartBeat)));
-}
-
-static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
-{
-	int i, offset, mem_type, bar_type;
-	if (pci_bar_addr == PCI_BASE_ADDRESS_0)	/* looking for BAR zero? */
-		return 0;
-	offset = 0;
-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-		bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
-		if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
-			offset += 4;
-		else {
-			mem_type = pci_resource_flags(pdev, i) &
-			    PCI_BASE_ADDRESS_MEM_TYPE_MASK;
-			switch (mem_type) {
-			case PCI_BASE_ADDRESS_MEM_TYPE_32:
-			case PCI_BASE_ADDRESS_MEM_TYPE_1M:
-				offset += 4;	/* 32 bit */
-				break;
-			case PCI_BASE_ADDRESS_MEM_TYPE_64:
-				offset += 8;
-				break;
-			default:	/* reserved in PCI 2.2 */
-				dev_warn(&pdev->dev,
-				       "Base address is invalid\n");
-				return -1;
-				break;
-			}
-		}
-		if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
-			return i + 1;
-	}
-	return -1;
-}
-
-/* Fill in bucket_map[], given nsgs (the max number of
- * scatter gather elements supported) and bucket[],
- * which is an array of 8 integers.  The bucket[] array
- * contains 8 different DMA transfer sizes (in 16
- * byte increments) which the controller uses to fetch
- * commands.  This function fills in bucket_map[], which
- * maps a given number of scatter gather elements to one of
- * the 8 DMA transfer sizes.  The point of it is to allow the
- * controller to only do as much DMA as needed to fetch the
- * command, with the DMA transfer size encoded in the lower
- * bits of the command address.
- */
-static void  calc_bucket_map(int bucket[], int num_buckets,
-	int nsgs, int *bucket_map)
-{
-	int i, j, b, size;
-
-	/* even a command with 0 SGs requires 4 blocks */
-#define MINIMUM_TRANSFER_BLOCKS 4
-#define NUM_BUCKETS 8
-	/* Note, bucket_map must have nsgs+1 entries. */
-	for (i = 0; i <= nsgs; i++) {
-		/* Compute size of a command with i SG entries */
-		size = i + MINIMUM_TRANSFER_BLOCKS;
-		b = num_buckets; /* Assume the biggest bucket */
-		/* Find the bucket that is just big enough */
-		for (j = 0; j < 8; j++) {
-			if (bucket[j] >= size) {
-				b = j;
-				break;
-			}
-		}
-		/* for a command with i SG entries, use bucket b. */
-		bucket_map[i] = b;
-	}
-}
-
-static void cciss_wait_for_mode_change_ack(ctlr_info_t *h)
-{
-	int i;
-
-	/* under certain very rare conditions, this can take awhile.
-	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
-	 * as we enter this code.) */
-	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
-		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
-			break;
-		usleep_range(10000, 20000);
-	}
-}
-
-static void cciss_enter_performant_mode(ctlr_info_t *h, u32 use_short_tags)
-{
-	/* This is a bit complicated.  There are 8 registers on
-	 * the controller which we write to to tell it 8 different
-	 * sizes of commands which there may be.  It's a way of
-	 * reducing the DMA done to fetch each command.  Encoded into
-	 * each command's tag are 3 bits which communicate to the controller
-	 * which of the eight sizes that command fits within.  The size of
-	 * each command depends on how many scatter gather entries there are.
-	 * Each SG entry requires 16 bytes.  The eight registers are programmed
-	 * with the number of 16-byte blocks a command of that size requires.
-	 * The smallest command possible requires 5 such 16 byte blocks.
-	 * the largest command possible requires MAXSGENTRIES + 4 16-byte
-	 * blocks.  Note, this only extends to the SG entries contained
-	 * within the command block, and does not extend to chained blocks
-	 * of SG elements.   bft[] contains the eight values we write to
-	 * the registers.  They are not evenly distributed, but have more
-	 * sizes for small commands, and fewer sizes for larger commands.
-	 */
-	__u32 trans_offset;
-	int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
-			/*
-			 *  5 = 1 s/g entry or 4k
-			 *  6 = 2 s/g entry or 8k
-			 *  8 = 4 s/g entry or 16k
-			 * 10 = 6 s/g entry or 24k
-			 */
-	unsigned long register_value;
-	BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
-
-	h->reply_pool_wraparound = 1; /* spec: init to 1 */
-
-	/* Controller spec: zero out this buffer. */
-	memset(h->reply_pool, 0, h->max_commands * sizeof(__u64));
-	h->reply_pool_head = h->reply_pool;
-
-	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
-	calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries,
-				h->blockFetchTable);
-	writel(bft[0], &h->transtable->BlockFetch0);
-	writel(bft[1], &h->transtable->BlockFetch1);
-	writel(bft[2], &h->transtable->BlockFetch2);
-	writel(bft[3], &h->transtable->BlockFetch3);
-	writel(bft[4], &h->transtable->BlockFetch4);
-	writel(bft[5], &h->transtable->BlockFetch5);
-	writel(bft[6], &h->transtable->BlockFetch6);
-	writel(bft[7], &h->transtable->BlockFetch7);
-
-	/* size of controller ring buffer */
-	writel(h->max_commands, &h->transtable->RepQSize);
-	writel(1, &h->transtable->RepQCount);
-	writel(0, &h->transtable->RepQCtrAddrLow32);
-	writel(0, &h->transtable->RepQCtrAddrHigh32);
-	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
-	writel(0, &h->transtable->RepQAddr0High32);
-	writel(CFGTBL_Trans_Performant | use_short_tags,
-			&(h->cfgtable->HostWrite.TransportRequest));
-
-	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
-	cciss_wait_for_mode_change_ack(h);
-	register_value = readl(&(h->cfgtable->TransportActive));
-	if (!(register_value & CFGTBL_Trans_Performant))
-		dev_warn(&h->pdev->dev, "cciss: unable to get board into"
-					" performant mode\n");
-}
-
-static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
-{
-	__u32 trans_support;
-
-	if (cciss_simple_mode)
-		return;
-
-	dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n");
-	/* Attempt to put controller into performant mode if supported */
-	/* Does board support performant mode? */
-	trans_support = readl(&(h->cfgtable->TransportSupport));
-	if (!(trans_support & PERFORMANT_MODE))
-		return;
-
-	dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n");
-	/* Performant mode demands commands on a 32 byte boundary
-	 * pci_alloc_consistent aligns on page boundarys already.
-	 * Just need to check if divisible by 32
-	 */
-	if ((sizeof(CommandList_struct) % 32) != 0) {
-		dev_warn(&h->pdev->dev, "%s %d %s\n",
-			"cciss info: command size[",
-			(int)sizeof(CommandList_struct),
-			"] not divisible by 32, no performant mode..\n");
-		return;
-	}
-
-	/* Performant mode ring buffer and supporting data structures */
-	h->reply_pool = (__u64 *)pci_alloc_consistent(
-		h->pdev, h->max_commands * sizeof(__u64),
-		&(h->reply_pool_dhandle));
-
-	/* Need a block fetch table for performant mode */
-	h->blockFetchTable = kmalloc(((h->maxsgentries+1) *
-		sizeof(__u32)), GFP_KERNEL);
-
-	if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
-		goto clean_up;
-
-	cciss_enter_performant_mode(h,
-		trans_support & CFGTBL_Trans_use_short_tags);
-
-	/* Change the access methods to the performant access methods */
-	h->access = SA5_performant_access;
-	h->transMethod = CFGTBL_Trans_Performant;
-
-	return;
-clean_up:
-	kfree(h->blockFetchTable);
-	if (h->reply_pool)
-		pci_free_consistent(h->pdev,
-				h->max_commands * sizeof(__u64),
-				h->reply_pool,
-				h->reply_pool_dhandle);
-	return;
-
-} /* cciss_put_controller_into_performant_mode */
-
-/* If MSI/MSI-X is supported by the kernel we will try to enable it on
- * controllers that are capable. If not, we use IO-APIC mode.
- */
-
-static void cciss_interrupt_mode(ctlr_info_t *h)
-{
-	int ret;
-
-	/* Some boards advertise MSI but don't really support it */
-	if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
-	    (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
-		goto default_int_mode;
-
-	ret = pci_alloc_irq_vectors(h->pdev, 4, 4, PCI_IRQ_MSIX);
-	if (ret >= 0)   {
-		h->intr[0] = pci_irq_vector(h->pdev, 0);
-		h->intr[1] = pci_irq_vector(h->pdev, 1);
-		h->intr[2] = pci_irq_vector(h->pdev, 2);
-		h->intr[3] = pci_irq_vector(h->pdev, 3);
-		return;
-	}
-
-	ret = pci_alloc_irq_vectors(h->pdev, 1, 1, PCI_IRQ_MSI);
-
-default_int_mode:
-	/* if we get here we're going to use the default interrupt mode */
-	h->intr[h->intr_mode] = pci_irq_vector(h->pdev, 0);
-	return;
-}
-
-static int cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
-{
-	int i;
-	u32 subsystem_vendor_id, subsystem_device_id;
-
-	subsystem_vendor_id = pdev->subsystem_vendor;
-	subsystem_device_id = pdev->subsystem_device;
-	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
-			subsystem_vendor_id;
-
-	for (i = 0; i < ARRAY_SIZE(products); i++) {
-		/* Stand aside for hpsa driver on request */
-		if (cciss_allow_hpsa)
-			return -ENODEV;
-		if (*board_id == products[i].board_id)
-			return i;
-	}
-	dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
-		*board_id);
-	return -ENODEV;
-}
-
-static inline bool cciss_board_disabled(ctlr_info_t *h)
-{
-	u16 command;
-
-	(void) pci_read_config_word(h->pdev, PCI_COMMAND, &command);
-	return ((command & PCI_COMMAND_MEMORY) == 0);
-}
-
-static int cciss_pci_find_memory_BAR(struct pci_dev *pdev,
-				     unsigned long *memory_bar)
-{
-	int i;
-
-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
-		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
-			/* addressing mode bits already removed */
-			*memory_bar = pci_resource_start(pdev, i);
-			dev_dbg(&pdev->dev, "memory BAR = %lx\n",
-				*memory_bar);
-			return 0;
-		}
-	dev_warn(&pdev->dev, "no memory BAR found\n");
-	return -ENODEV;
-}
-
-static int cciss_wait_for_board_state(struct pci_dev *pdev,
-				      void __iomem *vaddr, int wait_for_ready)
-#define BOARD_READY 1
-#define BOARD_NOT_READY 0
-{
-	int i, iterations;
-	u32 scratchpad;
-
-	if (wait_for_ready)
-		iterations = CCISS_BOARD_READY_ITERATIONS;
-	else
-		iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
-
-	for (i = 0; i < iterations; i++) {
-		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
-		if (wait_for_ready) {
-			if (scratchpad == CCISS_FIRMWARE_READY)
-				return 0;
-		} else {
-			if (scratchpad != CCISS_FIRMWARE_READY)
-				return 0;
-		}
-		msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
-	}
-	dev_warn(&pdev->dev, "board not ready, timed out.\n");
-	return -ENODEV;
-}
-
-static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
-				u32 *cfg_base_addr, u64 *cfg_base_addr_index,
-				u64 *cfg_offset)
-{
-	*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
-	*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
-	*cfg_base_addr &= (u32) 0x0000ffff;
-	*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
-	if (*cfg_base_addr_index == -1) {
-		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, "
-			"*cfg_base_addr = 0x%08x\n", *cfg_base_addr);
-		return -ENODEV;
-	}
-	return 0;
-}
-
-static int cciss_find_cfgtables(ctlr_info_t *h)
-{
-	u64 cfg_offset;
-	u32 cfg_base_addr;
-	u64 cfg_base_addr_index;
-	u32 trans_offset;
-	int rc;
-
-	rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
-		&cfg_base_addr_index, &cfg_offset);
-	if (rc)
-		return rc;
-	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
-		cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
-	if (!h->cfgtable)
-		return -ENOMEM;
-	rc = write_driver_ver_to_cfgtable(h->cfgtable);
-	if (rc)
-		return rc;
-	/* Find performant mode table. */
-	trans_offset = readl(&h->cfgtable->TransMethodOffset);
-	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
-				cfg_base_addr_index)+cfg_offset+trans_offset,
-				sizeof(*h->transtable));
-	if (!h->transtable)
-		return -ENOMEM;
-	return 0;
-}
-
-static void cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
-{
-	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
-
-	/* Limit commands in memory limited kdump scenario. */
-	if (reset_devices && h->max_commands > 32)
-		h->max_commands = 32;
-
-	if (h->max_commands < 16) {
-		dev_warn(&h->pdev->dev, "Controller reports "
-			"max supported commands of %d, an obvious lie. "
-			"Using 16.  Ensure that firmware is up to date.\n",
-			h->max_commands);
-		h->max_commands = 16;
-	}
-}
-
-/* Interrogate the hardware for some limits:
- * max commands, max SG elements without chaining, and with chaining,
- * SG chain block size, etc.
- */
-static void cciss_find_board_params(ctlr_info_t *h)
-{
-	cciss_get_max_perf_mode_cmds(h);
-	h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
-	h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
-	/*
-	 * The P600 may exhibit poor performnace under some workloads
-	 * if we use the value in the configuration table. Limit this
-	 * controller to MAXSGENTRIES (32) instead.
-	 */
-	if (h->board_id == 0x3225103C)
-		h->maxsgentries = MAXSGENTRIES;
-	/*
-	 * Limit in-command s/g elements to 32 save dma'able memory.
-	 * Howvever spec says if 0, use 31
-	 */
-	h->max_cmd_sgentries = 31;
-	if (h->maxsgentries > 512) {
-		h->max_cmd_sgentries = 32;
-		h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1;
-		h->maxsgentries--; /* save one for chain pointer */
-	} else {
-		h->maxsgentries = 31; /* default to traditional values */
-		h->chainsize = 0;
-	}
-}
-
-static inline bool CISS_signature_present(ctlr_info_t *h)
-{
-	if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
-		dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
-		return false;
-	}
-	return true;
-}
-
-/* Need to enable prefetch in the SCSI core for 6400 in x86 */
-static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h)
-{
-#ifdef CONFIG_X86
-	u32 prefetch;
-
-	prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
-	prefetch |= 0x100;
-	writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
-#endif
-}
-
-/* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
- * in a prefetch beyond physical memory.
- */
-static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h)
-{
-	u32 dma_prefetch;
-	__u32 dma_refetch;
-
-	if (h->board_id != 0x3225103C)
-		return;
-	dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
-	dma_prefetch |= 0x8000;
-	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
-	pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch);
-	dma_refetch |= 0x1;
-	pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch);
-}
-
-static int cciss_pci_init(ctlr_info_t *h)
-{
-	int prod_index, err;
-
-	prod_index = cciss_lookup_board_id(h->pdev, &h->board_id);
-	if (prod_index < 0)
-		return -ENODEV;
-	h->product_name = products[prod_index].product_name;
-	h->access = *(products[prod_index].access);
-
-	if (cciss_board_disabled(h)) {
-		dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
-		return -ENODEV;
-	}
-
-	pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
-				PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
-
-	err = pci_enable_device(h->pdev);
-	if (err) {
-		dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
-		return err;
-	}
-
-	err = pci_request_regions(h->pdev, "cciss");
-	if (err) {
-		dev_warn(&h->pdev->dev,
-			"Cannot obtain PCI resources, aborting\n");
-		return err;
-	}
-
-	dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq);
-	dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id);
-
-/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
- * else we use the IO-APIC interrupt assigned to us by system ROM.
- */
-	cciss_interrupt_mode(h);
-	err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr);
-	if (err)
-		goto err_out_free_res;
-	h->vaddr = remap_pci_mem(h->paddr, 0x250);
-	if (!h->vaddr) {
-		err = -ENOMEM;
-		goto err_out_free_res;
-	}
-	err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
-	if (err)
-		goto err_out_free_res;
-	err = cciss_find_cfgtables(h);
-	if (err)
-		goto err_out_free_res;
-	print_cfg_table(h);
-	cciss_find_board_params(h);
-
-	if (!CISS_signature_present(h)) {
-		err = -ENODEV;
-		goto err_out_free_res;
-	}
-	cciss_enable_scsi_prefetch(h);
-	cciss_p600_dma_prefetch_quirk(h);
-	err = cciss_enter_simple_mode(h);
-	if (err)
-		goto err_out_free_res;
-	cciss_put_controller_into_performant_mode(h);
-	return 0;
-
-err_out_free_res:
-	/*
-	 * Deliberately omit pci_disable_device(): it does something nasty to
-	 * Smart Array controllers that pci_enable_device does not undo
-	 */
-	if (h->transtable)
-		iounmap(h->transtable);
-	if (h->cfgtable)
-		iounmap(h->cfgtable);
-	if (h->vaddr)
-		iounmap(h->vaddr);
-	pci_release_regions(h->pdev);
-	return err;
-}
-
-/* Function to find the first free pointer into our hba[] array
- * Returns -1 if no free entries are left.
- */
-static int alloc_cciss_hba(struct pci_dev *pdev)
-{
-	int i;
-
-	for (i = 0; i < MAX_CTLR; i++) {
-		if (!hba[i]) {
-			ctlr_info_t *h;
-
-			h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
-			if (!h)
-				goto Enomem;
-			hba[i] = h;
-			return i;
-		}
-	}
-	dev_warn(&pdev->dev, "This driver supports a maximum"
-	       " of %d controllers.\n", MAX_CTLR);
-	return -1;
-Enomem:
-	dev_warn(&pdev->dev, "out of memory.\n");
-	return -1;
-}
-
-static void free_hba(ctlr_info_t *h)
-{
-	int i;
-
-	hba[h->ctlr] = NULL;
-	for (i = 0; i < h->highest_lun + 1; i++)
-		if (h->gendisk[i] != NULL)
-			put_disk(h->gendisk[i]);
-	kfree(h);
-}
-
-/* Send a message CDB to the firmware. */
-static int cciss_message(struct pci_dev *pdev, unsigned char opcode,
-			 unsigned char type)
-{
-	typedef struct {
-		CommandListHeader_struct CommandHeader;
-		RequestBlock_struct Request;
-		ErrDescriptor_struct ErrorDescriptor;
-	} Command;
-	static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct);
-	Command *cmd;
-	dma_addr_t paddr64;
-	uint32_t paddr32, tag;
-	void __iomem *vaddr;
-	int i, err;
-
-	vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
-	if (vaddr == NULL)
-		return -ENOMEM;
-
-	/* The Inbound Post Queue only accepts 32-bit physical addresses for the
-	   CCISS commands, so they must be allocated from the lower 4GiB of
-	   memory. */
-	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (err) {
-		iounmap(vaddr);
-		return -ENOMEM;
-	}
-
-	cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
-	if (cmd == NULL) {
-		iounmap(vaddr);
-		return -ENOMEM;
-	}
-
-	/* This must fit, because of the 32-bit consistent DMA mask.  Also,
-	   although there's no guarantee, we assume that the address is at
-	   least 4-byte aligned (most likely, it's page-aligned). */
-	paddr32 = paddr64;
-
-	cmd->CommandHeader.ReplyQueue = 0;
-	cmd->CommandHeader.SGList = 0;
-	cmd->CommandHeader.SGTotal = 0;
-	cmd->CommandHeader.Tag.lower = paddr32;
-	cmd->CommandHeader.Tag.upper = 0;
-	memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
-
-	cmd->Request.CDBLen = 16;
-	cmd->Request.Type.Type = TYPE_MSG;
-	cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
-	cmd->Request.Type.Direction = XFER_NONE;
-	cmd->Request.Timeout = 0; /* Don't time out */
-	cmd->Request.CDB[0] = opcode;
-	cmd->Request.CDB[1] = type;
-	memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */
-
-	cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command);
-	cmd->ErrorDescriptor.Addr.upper = 0;
-	cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct);
-
-	writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
-
-	for (i = 0; i < 10; i++) {
-		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
-		if ((tag & ~3) == paddr32)
-			break;
-		msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS);
-	}
-
-	iounmap(vaddr);
-
-	/* we leak the DMA buffer here ... no choice since the controller could
-	   still complete the command. */
-	if (i == 10) {
-		dev_err(&pdev->dev,
-			"controller message %02x:%02x timed out\n",
-			opcode, type);
-		return -ETIMEDOUT;
-	}
-
-	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
-
-	if (tag & 2) {
-		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
-			opcode, type);
-		return -EIO;
-	}
-
-	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
-		opcode, type);
-	return 0;
-}
-
-#define cciss_noop(p) cciss_message(p, 3, 0)
-
-static int cciss_controller_hard_reset(struct pci_dev *pdev,
-	void * __iomem vaddr, u32 use_doorbell)
-{
-	u16 pmcsr;
-	int pos;
-
-	if (use_doorbell) {
-		/* For everything after the P600, the PCI power state method
-		 * of resetting the controller doesn't work, so we have this
-		 * other way using the doorbell register.
-		 */
-		dev_info(&pdev->dev, "using doorbell to reset controller\n");
-		writel(use_doorbell, vaddr + SA5_DOORBELL);
-	} else { /* Try to do it the PCI power state way */
-
-		/* Quoting from the Open CISS Specification: "The Power
-		 * Management Control/Status Register (CSR) controls the power
-		 * state of the device.  The normal operating state is D0,
-		 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
-		 * the controller, place the interface device in D3 then to D0,
-		 * this causes a secondary PCI reset which will reset the
-		 * controller." */
-
-		pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
-		if (pos == 0) {
-			dev_err(&pdev->dev,
-				"cciss_controller_hard_reset: "
-				"PCI PM not supported\n");
-			return -ENODEV;
-		}
-		dev_info(&pdev->dev, "using PCI PM to reset controller\n");
-		/* enter the D3hot power management state */
-		pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
-		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
-		pmcsr |= PCI_D3hot;
-		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
-
-		msleep(500);
-
-		/* enter the D0 power management state */
-		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
-		pmcsr |= PCI_D0;
-		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
-
-		/*
-		 * The P600 requires a small delay when changing states.
-		 * Otherwise we may think the board did not reset and we bail.
-		 * This for kdump only and is particular to the P600.
-		 */
-		msleep(500);
-	}
-	return 0;
-}
-
-static void init_driver_version(char *driver_version, int len)
-{
-	memset(driver_version, 0, len);
-	strncpy(driver_version, "cciss " DRIVER_NAME, len - 1);
-}
-
-static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable)
-{
-	char *driver_version;
-	int i, size = sizeof(cfgtable->driver_version);
-
-	driver_version = kmalloc(size, GFP_KERNEL);
-	if (!driver_version)
-		return -ENOMEM;
-
-	init_driver_version(driver_version, size);
-	for (i = 0; i < size; i++)
-		writeb(driver_version[i], &cfgtable->driver_version[i]);
-	kfree(driver_version);
-	return 0;
-}
-
-static void read_driver_ver_from_cfgtable(CfgTable_struct __iomem *cfgtable,
-					  unsigned char *driver_ver)
-{
-	int i;
-
-	for (i = 0; i < sizeof(cfgtable->driver_version); i++)
-		driver_ver[i] = readb(&cfgtable->driver_version[i]);
-}
-
-static int controller_reset_failed(CfgTable_struct __iomem *cfgtable)
-{
-
-	char *driver_ver, *old_driver_ver;
-	int rc, size = sizeof(cfgtable->driver_version);
-
-	old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
-	if (!old_driver_ver)
-		return -ENOMEM;
-	driver_ver = old_driver_ver + size;
-
-	/* After a reset, the 32 bytes of "driver version" in the cfgtable
-	 * should have been changed, otherwise we know the reset failed.
-	 */
-	init_driver_version(old_driver_ver, size);
-	read_driver_ver_from_cfgtable(cfgtable, driver_ver);
-	rc = !memcmp(driver_ver, old_driver_ver, size);
-	kfree(old_driver_ver);
-	return rc;
-}
-
-/* This does a hard reset of the controller using PCI power management
- * states or using the doorbell register. */
-static int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
-{
-	u64 cfg_offset;
-	u32 cfg_base_addr;
-	u64 cfg_base_addr_index;
-	void __iomem *vaddr;
-	unsigned long paddr;
-	u32 misc_fw_support;
-	int rc;
-	CfgTable_struct __iomem *cfgtable;
-	u32 use_doorbell;
-	u32 board_id;
-	u16 command_register;
-
-	/* For controllers as old a the p600, this is very nearly
-	 * the same thing as
-	 *
-	 * pci_save_state(pci_dev);
-	 * pci_set_power_state(pci_dev, PCI_D3hot);
-	 * pci_set_power_state(pci_dev, PCI_D0);
-	 * pci_restore_state(pci_dev);
-	 *
-	 * For controllers newer than the P600, the pci power state
-	 * method of resetting doesn't work so we have another way
-	 * using the doorbell register.
-	 */
-
-	/* Exclude 640x boards.  These are two pci devices in one slot
-	 * which share a battery backed cache module.  One controls the
-	 * cache, the other accesses the cache through the one that controls
-	 * it.  If we reset the one controlling the cache, the other will
-	 * likely not be happy.  Just forbid resetting this conjoined mess.
-	 */
-	cciss_lookup_board_id(pdev, &board_id);
-	if (!ctlr_is_resettable(board_id)) {
-		dev_warn(&pdev->dev, "Controller not resettable\n");
-		return -ENODEV;
-	}
-
-	/* if controller is soft- but not hard resettable... */
-	if (!ctlr_is_hard_resettable(board_id))
-		return -ENOTSUPP; /* try soft reset later. */
-
-	/* Save the PCI command register */
-	pci_read_config_word(pdev, 4, &command_register);
-	/* Turn the board off.  This is so that later pci_restore_state()
-	 * won't turn the board on before the rest of config space is ready.
-	 */
-	pci_disable_device(pdev);
-	pci_save_state(pdev);
-
-	/* find the first memory BAR, so we can find the cfg table */
-	rc = cciss_pci_find_memory_BAR(pdev, &paddr);
-	if (rc)
-		return rc;
-	vaddr = remap_pci_mem(paddr, 0x250);
-	if (!vaddr)
-		return -ENOMEM;
-
-	/* find cfgtable in order to check if reset via doorbell is supported */
-	rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
-					&cfg_base_addr_index, &cfg_offset);
-	if (rc)
-		goto unmap_vaddr;
-	cfgtable = remap_pci_mem(pci_resource_start(pdev,
-		       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
-	if (!cfgtable) {
-		rc = -ENOMEM;
-		goto unmap_vaddr;
-	}
-	rc = write_driver_ver_to_cfgtable(cfgtable);
-	if (rc)
-		goto unmap_vaddr;
-
-	/* If reset via doorbell register is supported, use that.
-	 * There are two such methods.  Favor the newest method.
-	 */
-	misc_fw_support = readl(&cfgtable->misc_fw_support);
-	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
-	if (use_doorbell) {
-		use_doorbell = DOORBELL_CTLR_RESET2;
-	} else {
-		use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
-		if (use_doorbell) {
-			dev_warn(&pdev->dev, "Controller claims that "
-				"'Bit 2 doorbell reset' is "
-				"supported, but not 'bit 5 doorbell reset'.  "
-				"Firmware update is recommended.\n");
-			rc = -ENOTSUPP; /* use the soft reset */
-			goto unmap_cfgtable;
-		}
-	}
-
-	rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
-	if (rc)
-		goto unmap_cfgtable;
-	pci_restore_state(pdev);
-	rc = pci_enable_device(pdev);
-	if (rc) {
-		dev_warn(&pdev->dev, "failed to enable device.\n");
-		goto unmap_cfgtable;
-	}
-	pci_write_config_word(pdev, 4, command_register);
-
-	/* Some devices (notably the HP Smart Array 5i Controller)
-	   need a little pause here */
-	msleep(CCISS_POST_RESET_PAUSE_MSECS);
-
-	/* Wait for board to become not ready, then ready. */
-	dev_info(&pdev->dev, "Waiting for board to reset.\n");
-	rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
-	if (rc) {
-		dev_warn(&pdev->dev, "Failed waiting for board to hard reset."
-				"  Will try soft reset.\n");
-		rc = -ENOTSUPP; /* Not expected, but try soft reset later */
-		goto unmap_cfgtable;
-	}
-	rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
-	if (rc) {
-		dev_warn(&pdev->dev,
-			"failed waiting for board to become ready "
-			"after hard reset\n");
-		goto unmap_cfgtable;
-	}
-
-	rc = controller_reset_failed(vaddr);
-	if (rc < 0)
-		goto unmap_cfgtable;
-	if (rc) {
-		dev_warn(&pdev->dev, "Unable to successfully hard reset "
-			"controller. Will try soft reset.\n");
-		rc = -ENOTSUPP; /* Not expected, but try soft reset later */
-	} else {
-		dev_info(&pdev->dev, "Board ready after hard reset.\n");
-	}
-
-unmap_cfgtable:
-	iounmap(cfgtable);
-
-unmap_vaddr:
-	iounmap(vaddr);
-	return rc;
-}
-
-static int cciss_init_reset_devices(struct pci_dev *pdev)
-{
-	int rc, i;
-
-	if (!reset_devices)
-		return 0;
-
-	/* Reset the controller with a PCI power-cycle or via doorbell */
-	rc = cciss_kdump_hard_reset_controller(pdev);
-
-	/* -ENOTSUPP here means we cannot reset the controller
-	 * but it's already (and still) up and running in
-	 * "performant mode".  Or, it might be 640x, which can't reset
-	 * due to concerns about shared bbwc between 6402/6404 pair.
-	 */
-	if (rc == -ENOTSUPP)
-		return rc; /* just try to do the kdump anyhow. */
-	if (rc)
-		return -ENODEV;
-
-	/* Now try to get the controller to respond to a no-op */
-	dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
-	for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
-		if (cciss_noop(pdev) == 0)
-			break;
-		else
-			dev_warn(&pdev->dev, "no-op failed%s\n",
-				(i < CCISS_POST_RESET_NOOP_RETRIES - 1 ?
-					"; re-trying" : ""));
-		msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS);
-	}
-	return 0;
-}
-
-static int cciss_allocate_cmd_pool(ctlr_info_t *h)
-{
-	h->cmd_pool_bits = kmalloc(BITS_TO_LONGS(h->nr_cmds) *
-		sizeof(unsigned long), GFP_KERNEL);
-	h->cmd_pool = pci_alloc_consistent(h->pdev,
-		h->nr_cmds * sizeof(CommandList_struct),
-		&(h->cmd_pool_dhandle));
-	h->errinfo_pool = pci_alloc_consistent(h->pdev,
-		h->nr_cmds * sizeof(ErrorInfo_struct),
-		&(h->errinfo_pool_dhandle));
-	if ((h->cmd_pool_bits == NULL)
-		|| (h->cmd_pool == NULL)
-		|| (h->errinfo_pool == NULL)) {
-		dev_err(&h->pdev->dev, "out of memory");
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-static int cciss_allocate_scatterlists(ctlr_info_t *h)
-{
-	int i;
-
-	/* zero it, so that on free we need not know how many were alloc'ed */
-	h->scatter_list = kzalloc(h->max_commands *
-				sizeof(struct scatterlist *), GFP_KERNEL);
-	if (!h->scatter_list)
-		return -ENOMEM;
-
-	for (i = 0; i < h->nr_cmds; i++) {
-		h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) *
-						h->maxsgentries, GFP_KERNEL);
-		if (h->scatter_list[i] == NULL) {
-			dev_err(&h->pdev->dev, "could not allocate "
-				"s/g lists\n");
-			return -ENOMEM;
-		}
-	}
-	return 0;
-}
-
-static void cciss_free_scatterlists(ctlr_info_t *h)
-{
-	int i;
-
-	if (h->scatter_list) {
-		for (i = 0; i < h->nr_cmds; i++)
-			kfree(h->scatter_list[i]);
-		kfree(h->scatter_list);
-	}
-}
-
-static void cciss_free_cmd_pool(ctlr_info_t *h)
-{
-	kfree(h->cmd_pool_bits);
-	if (h->cmd_pool)
-		pci_free_consistent(h->pdev,
-			h->nr_cmds * sizeof(CommandList_struct),
-			h->cmd_pool, h->cmd_pool_dhandle);
-	if (h->errinfo_pool)
-		pci_free_consistent(h->pdev,
-			h->nr_cmds * sizeof(ErrorInfo_struct),
-			h->errinfo_pool, h->errinfo_pool_dhandle);
-}
-
-static int cciss_request_irq(ctlr_info_t *h,
-	irqreturn_t (*msixhandler)(int, void *),
-	irqreturn_t (*intxhandler)(int, void *))
-{
-	if (h->pdev->msi_enabled || h->pdev->msix_enabled) {
-		if (!request_irq(h->intr[h->intr_mode], msixhandler,
-				0, h->devname, h))
-			return 0;
-		dev_err(&h->pdev->dev, "Unable to get msi irq %d"
-			" for %s\n", h->intr[h->intr_mode],
-			h->devname);
-		return -1;
-	}
-
-	if (!request_irq(h->intr[h->intr_mode], intxhandler,
-			IRQF_SHARED, h->devname, h))
-		return 0;
-	dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
-		h->intr[h->intr_mode], h->devname);
-	return -1;
-}
-
-static int cciss_kdump_soft_reset(ctlr_info_t *h)
-{
-	if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) {
-		dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
-		return -EIO;
-	}
-
-	dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
-	if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
-		dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
-		return -1;
-	}
-
-	dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
-	if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
-		dev_warn(&h->pdev->dev, "Board failed to become ready "
-			"after soft reset.\n");
-		return -1;
-	}
-
-	return 0;
-}
-
-static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
-{
-	int ctlr = h->ctlr;
-
-	free_irq(h->intr[h->intr_mode], h);
-	pci_free_irq_vectors(h->pdev);
-	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
-	cciss_free_scatterlists(h);
-	cciss_free_cmd_pool(h);
-	kfree(h->blockFetchTable);
-	if (h->reply_pool)
-		pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
-				h->reply_pool, h->reply_pool_dhandle);
-	if (h->transtable)
-		iounmap(h->transtable);
-	if (h->cfgtable)
-		iounmap(h->cfgtable);
-	if (h->vaddr)
-		iounmap(h->vaddr);
-	unregister_blkdev(h->major, h->devname);
-	cciss_destroy_hba_sysfs_entry(h);
-	pci_release_regions(h->pdev);
-	kfree(h);
-	hba[ctlr] = NULL;
-}
-
-/*
- *  This is it.  Find all the controllers and register them.  I really hate
- *  stealing all these major device numbers.
- *  returns the number of block devices registered.
- */
-static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	int i;
-	int j = 0;
-	int rc;
-	int try_soft_reset = 0;
-	int dac, return_code;
-	InquiryData_struct *inq_buff;
-	ctlr_info_t *h;
-	unsigned long flags;
-
-	/*
-	 * By default the cciss driver is used for all older HP Smart Array
-	 * controllers. There are module paramaters that allow a user to
-	 * override this behavior and instead use the hpsa SCSI driver. If
-	 * this is the case cciss may be loaded first from the kdump initrd
-	 * image and cause a kernel panic. So if reset_devices is true and
-	 * cciss_allow_hpsa is set just bail.
-	 */
-	if ((reset_devices) && (cciss_allow_hpsa == 1))
-		return -ENODEV;
-	rc = cciss_init_reset_devices(pdev);
-	if (rc) {
-		if (rc != -ENOTSUPP)
-			return rc;
-		/* If the reset fails in a particular way (it has no way to do
-		 * a proper hard reset, so returns -ENOTSUPP) we can try to do
-		 * a soft reset once we get the controller configured up to the
-		 * point that it can accept a command.
-		 */
-		try_soft_reset = 1;
-		rc = 0;
-	}
-
-reinit_after_soft_reset:
-
-	i = alloc_cciss_hba(pdev);
-	if (i < 0)
-		return -ENOMEM;
-
-	h = hba[i];
-	h->pdev = pdev;
-	h->busy_initializing = 1;
-	h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
-	INIT_LIST_HEAD(&h->cmpQ);
-	INIT_LIST_HEAD(&h->reqQ);
-	mutex_init(&h->busy_shutting_down);
-
-	if (cciss_pci_init(h) != 0)
-		goto clean_no_release_regions;
-
-	sprintf(h->devname, "cciss%d", i);
-	h->ctlr = i;
-
-	if (cciss_tape_cmds < 2)
-		cciss_tape_cmds = 2;
-	if (cciss_tape_cmds > 16)
-		cciss_tape_cmds = 16;
-
-	init_completion(&h->scan_wait);
-
-	if (cciss_create_hba_sysfs_entry(h))
-		goto clean0;
-
-	/* configure PCI DMA stuff */
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
-		dac = 1;
-	else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
-		dac = 0;
-	else {
-		dev_err(&h->pdev->dev, "no suitable DMA available\n");
-		goto clean1;
-	}
-
-	/*
-	 * register with the major number, or get a dynamic major number
-	 * by passing 0 as argument.  This is done for greater than
-	 * 8 controller support.
-	 */
-	if (i < MAX_CTLR_ORIG)
-		h->major = COMPAQ_CISS_MAJOR + i;
-	rc = register_blkdev(h->major, h->devname);
-	if (rc == -EBUSY || rc == -EINVAL) {
-		dev_err(&h->pdev->dev,
-		       "Unable to get major number %d for %s "
-		       "on hba %d\n", h->major, h->devname, i);
-		goto clean1;
-	} else {
-		if (i >= MAX_CTLR_ORIG)
-			h->major = rc;
-	}
-
-	/* make sure the board interrupts are off */
-	h->access.set_intr_mask(h, CCISS_INTR_OFF);
-	rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
-	if (rc)
-		goto clean2;
-
-	dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
-	       h->devname, pdev->device, pci_name(pdev),
-	       h->intr[h->intr_mode], dac ? "" : " not");
-
-	if (cciss_allocate_cmd_pool(h))
-		goto clean4;
-
-	if (cciss_allocate_scatterlists(h))
-		goto clean4;
-
-	h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
-		h->chainsize, h->nr_cmds);
-	if (!h->cmd_sg_list && h->chainsize > 0)
-		goto clean4;
-
-	spin_lock_init(&h->lock);
-
-	/* Initialize the pdev driver private data.
-	   have it point to h.  */
-	pci_set_drvdata(pdev, h);
-	/* command and error info recs zeroed out before
-	   they are used */
-	bitmap_zero(h->cmd_pool_bits, h->nr_cmds);
-
-	h->num_luns = 0;
-	h->highest_lun = -1;
-	for (j = 0; j < CISS_MAX_LUN; j++) {
-		h->drv[j] = NULL;
-		h->gendisk[j] = NULL;
-	}
-
-	/* At this point, the controller is ready to take commands.
-	 * Now, if reset_devices and the hard reset didn't work, try
-	 * the soft reset and see if that works.
-	 */
-	if (try_soft_reset) {
-
-		/* This is kind of gross.  We may or may not get a completion
-		 * from the soft reset command, and if we do, then the value
-		 * from the fifo may or may not be valid.  So, we wait 10 secs
-		 * after the reset throwing away any completions we get during
-		 * that time.  Unregister the interrupt handler and register
-		 * fake ones to scoop up any residual completions.
-		 */
-		spin_lock_irqsave(&h->lock, flags);
-		h->access.set_intr_mask(h, CCISS_INTR_OFF);
-		spin_unlock_irqrestore(&h->lock, flags);
-		free_irq(h->intr[h->intr_mode], h);
-		rc = cciss_request_irq(h, cciss_msix_discard_completions,
-					cciss_intx_discard_completions);
-		if (rc) {
-			dev_warn(&h->pdev->dev, "Failed to request_irq after "
-				"soft reset.\n");
-			goto clean4;
-		}
-
-		rc = cciss_kdump_soft_reset(h);
-		if (rc) {
-			dev_warn(&h->pdev->dev, "Soft reset failed.\n");
-			goto clean4;
-		}
-
-		dev_info(&h->pdev->dev, "Board READY.\n");
-		dev_info(&h->pdev->dev,
-			"Waiting for stale completions to drain.\n");
-		h->access.set_intr_mask(h, CCISS_INTR_ON);
-		msleep(10000);
-		h->access.set_intr_mask(h, CCISS_INTR_OFF);
-
-		rc = controller_reset_failed(h->cfgtable);
-		if (rc)
-			dev_info(&h->pdev->dev,
-				"Soft reset appears to have failed.\n");
-
-		/* since the controller's reset, we have to go back and re-init
-		 * everything.  Easiest to just forget what we've done and do it
-		 * all over again.
-		 */
-		cciss_undo_allocations_after_kdump_soft_reset(h);
-		try_soft_reset = 0;
-		if (rc)
-			/* don't go to clean4, we already unallocated */
-			return -ENODEV;
-
-		goto reinit_after_soft_reset;
-	}
-
-	cciss_scsi_setup(h);
-
-	/* Turn the interrupts on so we can service requests */
-	h->access.set_intr_mask(h, CCISS_INTR_ON);
-
-	/* Get the firmware version */
-	inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-	if (inq_buff == NULL) {
-		dev_err(&h->pdev->dev, "out of memory\n");
-		goto clean4;
-	}
-
-	return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
-		sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
-	if (return_code == IO_OK) {
-		h->firm_ver[0] = inq_buff->data_byte[32];
-		h->firm_ver[1] = inq_buff->data_byte[33];
-		h->firm_ver[2] = inq_buff->data_byte[34];
-		h->firm_ver[3] = inq_buff->data_byte[35];
-	} else {	 /* send command failed */
-		dev_warn(&h->pdev->dev, "unable to determine firmware"
-			" version of controller\n");
-	}
-	kfree(inq_buff);
-
-	cciss_procinit(h);
-
-	h->cciss_max_sectors = 8192;
-
-	rebuild_lun_table(h, 1, 0);
-	cciss_engage_scsi(h);
-	h->busy_initializing = 0;
-	return 0;
-
-clean4:
-	cciss_free_cmd_pool(h);
-	cciss_free_scatterlists(h);
-	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
-	free_irq(h->intr[h->intr_mode], h);
-clean2:
-	unregister_blkdev(h->major, h->devname);
-clean1:
-	cciss_destroy_hba_sysfs_entry(h);
-clean0:
-	pci_release_regions(pdev);
-clean_no_release_regions:
-	h->busy_initializing = 0;
-
-	/*
-	 * Deliberately omit pci_disable_device(): it does something nasty to
-	 * Smart Array controllers that pci_enable_device does not undo
-	 */
-	pci_set_drvdata(pdev, NULL);
-	free_hba(h);
-	return -ENODEV;
-}
-
-static void cciss_shutdown(struct pci_dev *pdev)
-{
-	ctlr_info_t *h;
-	char *flush_buf;
-	int return_code;
-
-	h = pci_get_drvdata(pdev);
-	flush_buf = kzalloc(4, GFP_KERNEL);
-	if (!flush_buf) {
-		dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n");
-		return;
-	}
-	/* write all data in the battery backed cache to disk */
-	return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
-		4, 0, CTLR_LUNID, TYPE_CMD);
-	kfree(flush_buf);
-	if (return_code != IO_OK)
-		dev_warn(&h->pdev->dev, "Error flushing cache\n");
-	h->access.set_intr_mask(h, CCISS_INTR_OFF);
-	free_irq(h->intr[h->intr_mode], h);
-}
-
-static int cciss_enter_simple_mode(struct ctlr_info *h)
-{
-	u32 trans_support;
-
-	trans_support = readl(&(h->cfgtable->TransportSupport));
-	if (!(trans_support & SIMPLE_MODE))
-		return -ENOTSUPP;
-
-	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
-	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
-	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
-	cciss_wait_for_mode_change_ack(h);
-	print_cfg_table(h);
-	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
-		dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
-		return -ENODEV;
-	}
-	h->transMethod = CFGTBL_Trans_Simple;
-	return 0;
-}
-
-
-static void cciss_remove_one(struct pci_dev *pdev)
-{
-	ctlr_info_t *h;
-	int i, j;
-
-	if (pci_get_drvdata(pdev) == NULL) {
-		dev_err(&pdev->dev, "Unable to remove device\n");
-		return;
-	}
-
-	h = pci_get_drvdata(pdev);
-	i = h->ctlr;
-	if (hba[i] == NULL) {
-		dev_err(&pdev->dev, "device appears to already be removed\n");
-		return;
-	}
-
-	mutex_lock(&h->busy_shutting_down);
-
-	remove_from_scan_list(h);
-	remove_proc_entry(h->devname, proc_cciss);
-	unregister_blkdev(h->major, h->devname);
-
-	/* remove it from the disk list */
-	for (j = 0; j < CISS_MAX_LUN; j++) {
-		struct gendisk *disk = h->gendisk[j];
-		if (disk) {
-			struct request_queue *q = disk->queue;
-
-			if (disk->flags & GENHD_FL_UP) {
-				cciss_destroy_ld_sysfs_entry(h, j, 1);
-				del_gendisk(disk);
-			}
-			if (q)
-				blk_cleanup_queue(q);
-		}
-	}
-
-#ifdef CONFIG_CISS_SCSI_TAPE
-	cciss_unregister_scsi(h);	/* unhook from SCSI subsystem */
-#endif
-
-	cciss_shutdown(pdev);
-
-	pci_free_irq_vectors(h->pdev);
-
-	iounmap(h->transtable);
-	iounmap(h->cfgtable);
-	iounmap(h->vaddr);
-
-	cciss_free_cmd_pool(h);
-	/* Free up sg elements */
-	for (j = 0; j < h->nr_cmds; j++)
-		kfree(h->scatter_list[j]);
-	kfree(h->scatter_list);
-	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
-	kfree(h->blockFetchTable);
-	if (h->reply_pool)
-		pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
-				h->reply_pool, h->reply_pool_dhandle);
-	/*
-	 * Deliberately omit pci_disable_device(): it does something nasty to
-	 * Smart Array controllers that pci_enable_device does not undo
-	 */
-	pci_release_regions(pdev);
-	pci_set_drvdata(pdev, NULL);
-	cciss_destroy_hba_sysfs_entry(h);
-	mutex_unlock(&h->busy_shutting_down);
-	free_hba(h);
-}
-
-static struct pci_driver cciss_pci_driver = {
-	.name = "cciss",
-	.probe = cciss_init_one,
-	.remove = cciss_remove_one,
-	.id_table = cciss_pci_device_id,	/* id_table */
-	.shutdown = cciss_shutdown,
-};
-
-/*
- *  This is it.  Register the PCI driver information for the cards we control
- *  the OS will call our registered routines when it finds one of our cards.
- */
-static int __init cciss_init(void)
-{
-	int err;
-
-	/*
-	 * The hardware requires that commands are aligned on a 64-bit
-	 * boundary. Given that we use pci_alloc_consistent() to allocate an
-	 * array of them, the size must be a multiple of 8 bytes.
-	 */
-	BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
-	printk(KERN_INFO DRIVER_NAME "\n");
-
-	err = bus_register(&cciss_bus_type);
-	if (err)
-		return err;
-
-	/* Start the scan thread */
-	cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan");
-	if (IS_ERR(cciss_scan_thread)) {
-		err = PTR_ERR(cciss_scan_thread);
-		goto err_bus_unregister;
-	}
-
-	/* Register for our PCI devices */
-	err = pci_register_driver(&cciss_pci_driver);
-	if (err)
-		goto err_thread_stop;
-
-	return err;
-
-err_thread_stop:
-	kthread_stop(cciss_scan_thread);
-err_bus_unregister:
-	bus_unregister(&cciss_bus_type);
-
-	return err;
-}
-
-static void __exit cciss_cleanup(void)
-{
-	int i;
-
-	pci_unregister_driver(&cciss_pci_driver);
-	/* double check that all controller entrys have been removed */
-	for (i = 0; i < MAX_CTLR; i++) {
-		if (hba[i] != NULL) {
-			dev_warn(&hba[i]->pdev->dev,
-				"had to remove controller\n");
-			cciss_remove_one(hba[i]->pdev);
-		}
-	}
-	kthread_stop(cciss_scan_thread);
-	if (proc_cciss)
-		remove_proc_entry("driver/cciss", NULL);
-	bus_unregister(&cciss_bus_type);
-}
-
-module_init(cciss_init);
-module_exit(cciss_cleanup);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
deleted file mode 100644
index 24b5fd75501a60b450c6f349f1230711b022d725..0000000000000000000000000000000000000000
--- a/drivers/block/cciss.h
+++ /dev/null
@@ -1,433 +0,0 @@
-#ifndef CCISS_H
-#define CCISS_H
-
-#include <linux/genhd.h>
-#include <linux/mutex.h>
-
-#include "cciss_cmd.h"
-
-
-#define NWD_SHIFT	4
-#define MAX_PART	(1 << NWD_SHIFT)
-
-#define IO_OK		0
-#define IO_ERROR	1
-#define IO_NEEDS_RETRY  3
-
-#define VENDOR_LEN	8
-#define MODEL_LEN	16
-#define REV_LEN		4
-
-struct ctlr_info;
-typedef struct ctlr_info ctlr_info_t;
-
-struct access_method {
-	void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
-	void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
-	unsigned long (*fifo_full)(ctlr_info_t *h);
-	bool (*intr_pending)(ctlr_info_t *h);
-	unsigned long (*command_completed)(ctlr_info_t *h);
-};
-typedef struct _drive_info_struct
-{
-	unsigned char LunID[8];
-	int 	usage_count;
-	struct request_queue *queue;
-	sector_t nr_blocks;
-	int	block_size;
-	int 	heads;
-	int	sectors;
-	int 	cylinders;
-	int	raid_level; /* set to -1 to indicate that
-			     * the drive is not in use/configured
-			     */
-	int	busy_configuring; /* This is set when a drive is being removed
-				   * to prevent it from being opened or it's
-				   * queue from being started.
-				   */
-	struct	device dev;
-	__u8 serial_no[16]; /* from inquiry page 0x83,
-			     * not necc. null terminated.
-			     */
-	char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
-	char model[MODEL_LEN + 1];   /* SCSI model string */
-	char rev[REV_LEN + 1];       /* SCSI revision string */
-	char device_initialized;     /* indicates whether dev is initialized */
-} drive_info_struct;
-
-struct ctlr_info
-{
-	int	ctlr;
-	char	devname[8];
-	char    *product_name;
-	char	firm_ver[4]; /* Firmware version */
-	struct pci_dev *pdev;
-	__u32	board_id;
-	void __iomem *vaddr;
-	unsigned long paddr;
-	int 	nr_cmds; /* Number of commands allowed on this controller */
-	CfgTable_struct __iomem *cfgtable;
-	int	interrupts_enabled;
-	int	major;
-	int 	max_commands;
-	int	commands_outstanding;
-	int 	max_outstanding; /* Debug */ 
-	int	num_luns;
-	int 	highest_lun;
-	int	usage_count;  /* number of opens all all minor devices */
-	/* Need space for temp sg list
-	 * number of scatter/gathers supported
-	 * number of scatter/gathers in chained block
-	 */
-	struct	scatterlist **scatter_list;
-	int	maxsgentries;
-	int	chainsize;
-	int	max_cmd_sgentries;
-	SGDescriptor_struct **cmd_sg_list;
-
-#	define PERF_MODE_INT	0
-#	define DOORBELL_INT	1
-#	define SIMPLE_MODE_INT	2
-#	define MEMQ_MODE_INT	3
-	unsigned int intr[4];
-	int	intr_mode;
-	int 	cciss_max_sectors;
-	BYTE	cciss_read;
-	BYTE	cciss_write;
-	BYTE	cciss_read_capacity;
-
-	/* information about each logical volume */
-	drive_info_struct *drv[CISS_MAX_LUN];
-
-	struct access_method access;
-
-	/* queue and queue Info */ 
-	struct list_head reqQ;
-	struct list_head cmpQ;
-	unsigned int Qdepth;
-	unsigned int maxQsinceinit;
-	unsigned int maxSG;
-	spinlock_t lock;
-
-	/* pointers to command and error info pool */
-	CommandList_struct 	*cmd_pool;
-	dma_addr_t		cmd_pool_dhandle; 
-	ErrorInfo_struct 	*errinfo_pool;
-	dma_addr_t		errinfo_pool_dhandle; 
-        unsigned long  		*cmd_pool_bits;
-	int			nr_allocs;
-	int			nr_frees; 
-	int			busy_configuring;
-	int			busy_initializing;
-	int			busy_scanning;
-	struct mutex		busy_shutting_down;
-
-	/* This element holds the zero based queue number of the last
-	 * queue to be started.  It is used for fairness.
-	*/
-	int			next_to_run;
-
-	/* Disk structures we need to pass back */
-	struct gendisk   *gendisk[CISS_MAX_LUN];
-#ifdef CONFIG_CISS_SCSI_TAPE
-	struct cciss_scsi_adapter_data_t *scsi_ctlr;
-#endif
-	unsigned char alive;
-	struct list_head scan_list;
-	struct completion scan_wait;
-	struct device dev;
-	/*
-	 * Performant mode tables.
-	 */
-	u32 trans_support;
-	u32 trans_offset;
-	struct TransTable_struct *transtable;
-	unsigned long transMethod;
-
-	/*
-	 * Performant mode completion buffer
-	 */
-	u64 *reply_pool;
-	dma_addr_t reply_pool_dhandle;
-	u64 *reply_pool_head;
-	size_t reply_pool_size;
-	unsigned char reply_pool_wraparound;
-	u32 *blockFetchTable;
-};
-
-/*  Defining the diffent access_methods
- *
- * Memory mapped FIFO interface (SMART 53xx cards)
- */
-#define SA5_DOORBELL	0x20
-#define SA5_REQUEST_PORT_OFFSET	0x40
-#define SA5_REPLY_INTR_MASK_OFFSET	0x34
-#define SA5_REPLY_PORT_OFFSET		0x44
-#define SA5_INTR_STATUS		0x30
-#define SA5_SCRATCHPAD_OFFSET	0xB0
-
-#define SA5_CTCFG_OFFSET	0xB4
-#define SA5_CTMEM_OFFSET	0xB8
-
-#define SA5_INTR_OFF		0x08
-#define SA5B_INTR_OFF		0x04
-#define SA5_INTR_PENDING	0x08
-#define SA5B_INTR_PENDING	0x04
-#define FIFO_EMPTY		0xffffffff	
-#define CCISS_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
-/* Perf. mode flags */
-#define SA5_PERF_INTR_PENDING	0x04
-#define SA5_PERF_INTR_OFF	0x05
-#define SA5_OUTDB_STATUS_PERF_BIT	0x01
-#define SA5_OUTDB_CLEAR_PERF_BIT	0x01
-#define SA5_OUTDB_CLEAR         0xA0
-#define SA5_OUTDB_CLEAR_PERF_BIT        0x01
-#define SA5_OUTDB_STATUS        0x9C
-
-
-#define  CISS_ERROR_BIT		0x02
-
-#define CCISS_INTR_ON 	1 
-#define CCISS_INTR_OFF	0
-
-
-/* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board
- * to become ready, in seconds, before giving up on it.
- * CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
- * between polling the board to see if it is ready, in
- * milliseconds.  CCISS_BOARD_READY_ITERATIONS is derived
- * the above.
- */
-#define CCISS_BOARD_READY_WAIT_SECS (120)
-#define CCISS_BOARD_NOT_READY_WAIT_SECS (100)
-#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
-#define CCISS_BOARD_READY_ITERATIONS \
-	((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
-		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
-#define CCISS_BOARD_NOT_READY_ITERATIONS \
-	((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
-		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
-#define CCISS_POST_RESET_PAUSE_MSECS (3000)
-#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (4000)
-#define CCISS_POST_RESET_NOOP_RETRIES (12)
-#define CCISS_POST_RESET_NOOP_TIMEOUT_MSECS (10000)
-
-/* 
-	Send the command to the hardware 
-*/
-static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) 
-{
-#ifdef CCISS_DEBUG
-	printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n",
-			h->ctlr, c->busaddr);
-#endif /* CCISS_DEBUG */
-         writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
-	readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
-	 h->commands_outstanding++;
-	 if ( h->commands_outstanding > h->max_outstanding)
-		h->max_outstanding = h->commands_outstanding;
-}
-
-/*  
- *  This card is the opposite of the other cards.  
- *   0 turns interrupts on... 
- *   0x08 turns them off... 
- */
-static void SA5_intr_mask(ctlr_info_t *h, unsigned long val)
-{
-	if (val) 
-	{ /* Turn interrupts on */
-		h->interrupts_enabled = 1;
-		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-	} else /* Turn them off */
-	{
-		h->interrupts_enabled = 0;
-        	writel( SA5_INTR_OFF, 
-			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-	}
-}
-/*
- *  This card is the opposite of the other cards.
- *   0 turns interrupts on...
- *   0x04 turns them off...
- */
-static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
-{
-        if (val)
-        { /* Turn interrupts on */
-		h->interrupts_enabled = 1;
-                writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-        } else /* Turn them off */
-        {
-		h->interrupts_enabled = 0;
-                writel( SA5B_INTR_OFF,
-                        h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-        }
-}
-
-/* Performant mode intr_mask */
-static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
-{
-	if (val) { /* turn on interrupts */
-		h->interrupts_enabled = 1;
-		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-	} else {
-		h->interrupts_enabled = 0;
-		writel(SA5_PERF_INTR_OFF,
-				h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
-	}
-}
-
-/*
- *  Returns true if fifo is full.  
- * 
- */ 
-static unsigned long SA5_fifo_full(ctlr_info_t *h)
-{
-	if( h->commands_outstanding >= h->max_commands)
-		return(1);
-	else 
-		return(0);
-
-}
-/* 
- *   returns value read from hardware. 
- *     returns FIFO_EMPTY if there is nothing to read 
- */ 
-static unsigned long SA5_completed(ctlr_info_t *h)
-{
-	unsigned long register_value 
-		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
-	if(register_value != FIFO_EMPTY)
-	{
-		h->commands_outstanding--;
-#ifdef CCISS_DEBUG
-		printk("cciss:  Read %lx back from board\n", register_value);
-#endif /* CCISS_DEBUG */ 
-	} 
-#ifdef CCISS_DEBUG
-	else
-	{
-		printk("cciss:  FIFO Empty read\n");
-	}
-#endif 
-	return ( register_value); 
-
-}
-
-/* Performant mode command completed */
-static unsigned long SA5_performant_completed(ctlr_info_t *h)
-{
-	unsigned long register_value = FIFO_EMPTY;
-
-	/* flush the controller write of the reply queue by reading
-	 * outbound doorbell status register.
-	 */
-	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
-	/* msi auto clears the interrupt pending bit. */
-	if (!(h->pdev->msi_enabled || h->pdev->msix_enabled)) {
-		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
-		/* Do a read in order to flush the write to the controller
-		 * (as per spec.)
-		 */
-		register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
-	}
-
-	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
-		register_value = *(h->reply_pool_head);
-		(h->reply_pool_head)++;
-		h->commands_outstanding--;
-	} else {
-		register_value = FIFO_EMPTY;
-	}
-	/* Check for wraparound */
-	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
-		h->reply_pool_head = h->reply_pool;
-		h->reply_pool_wraparound ^= 1;
-	}
-
-	return register_value;
-}
-/*
- *	Returns true if an interrupt is pending.. 
- */
-static bool SA5_intr_pending(ctlr_info_t *h)
-{
-	unsigned long register_value  = 
-		readl(h->vaddr + SA5_INTR_STATUS);
-#ifdef CCISS_DEBUG
-	printk("cciss: intr_pending %lx\n", register_value);
-#endif  /* CCISS_DEBUG */
-	if( register_value &  SA5_INTR_PENDING) 
-		return  1;	
-	return 0 ;
-}
-
-/*
- *      Returns true if an interrupt is pending..
- */
-static bool SA5B_intr_pending(ctlr_info_t *h)
-{
-        unsigned long register_value  =
-                readl(h->vaddr + SA5_INTR_STATUS);
-#ifdef CCISS_DEBUG
-        printk("cciss: intr_pending %lx\n", register_value);
-#endif  /* CCISS_DEBUG */
-        if( register_value &  SA5B_INTR_PENDING)
-                return  1;
-        return 0 ;
-}
-
-static bool SA5_performant_intr_pending(ctlr_info_t *h)
-{
-	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
-
-	if (!register_value)
-		return false;
-
-	if (h->pdev->msi_enabled || h->pdev->msix_enabled)
-		return true;
-
-	/* Read outbound doorbell to flush */
-	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
-	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
-}
-
-static struct access_method SA5_access = {
-	.submit_command = SA5_submit_command,
-	.set_intr_mask = SA5_intr_mask,
-	.fifo_full = SA5_fifo_full,
-	.intr_pending = SA5_intr_pending,
-	.command_completed = SA5_completed,
-};
-
-static struct access_method SA5B_access = {
-	.submit_command = SA5_submit_command,
-	.set_intr_mask = SA5B_intr_mask,
-	.fifo_full = SA5_fifo_full,
-	.intr_pending = SA5B_intr_pending,
-	.command_completed = SA5_completed,
-};
-
-static struct access_method SA5_performant_access = {
-	.submit_command = SA5_submit_command,
-	.set_intr_mask = SA5_performant_intr_mask,
-	.fifo_full = SA5_fifo_full,
-	.intr_pending = SA5_performant_intr_pending,
-	.command_completed = SA5_performant_completed,
-};
-
-struct board_type {
-	__u32	board_id;
-	char	*product_name;
-	struct access_method *access;
-	int nr_cmds; /* Max cmds this kind of ctlr can handle. */
-};
-
-#endif /* CCISS_H */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
deleted file mode 100644
index d9be6b4d49a6a0e3bc950d3cb395edbef5af6d3b..0000000000000000000000000000000000000000
--- a/drivers/block/cciss_cmd.h
+++ /dev/null
@@ -1,269 +0,0 @@
-#ifndef CCISS_CMD_H
-#define CCISS_CMD_H
-
-#include <linux/cciss_defs.h>
-
-/* DEFINES */
-#define CISS_VERSION "1.00"
-
-/* general boundary definitions */
-#define MAXSGENTRIES            32
-#define CCISS_SG_CHAIN          0x80000000
-#define MAXREPLYQS              256
-
-/* Unit Attentions ASC's as defined for the MSA2012sa */
-#define POWER_OR_RESET			0x29
-#define STATE_CHANGED			0x2a
-#define UNIT_ATTENTION_CLEARED		0x2f
-#define LUN_FAILED			0x3e
-#define REPORT_LUNS_CHANGED		0x3f
-
-/* Unit Attentions ASCQ's as defined for the MSA2012sa */
-
-	/* These ASCQ's defined for ASC = POWER_OR_RESET */
-#define POWER_ON_RESET			0x00
-#define POWER_ON_REBOOT			0x01
-#define SCSI_BUS_RESET			0x02
-#define MSA_TARGET_RESET		0x03
-#define CONTROLLER_FAILOVER		0x04
-#define TRANSCEIVER_SE			0x05
-#define TRANSCEIVER_LVD			0x06
-
-	/* These ASCQ's defined for ASC = STATE_CHANGED */
-#define RESERVATION_PREEMPTED		0x03
-#define ASYM_ACCESS_CHANGED		0x06
-#define LUN_CAPACITY_CHANGED		0x09
-
-/* config space register offsets */
-#define CFG_VENDORID            0x00
-#define CFG_DEVICEID            0x02
-#define CFG_I2OBAR              0x10
-#define CFG_MEM1BAR             0x14
-
-/* i2o space register offsets */
-#define I2O_IBDB_SET            0x20
-#define I2O_IBDB_CLEAR          0x70
-#define I2O_INT_STATUS          0x30
-#define I2O_INT_MASK            0x34
-#define I2O_IBPOST_Q            0x40
-#define I2O_OBPOST_Q            0x44
-#define I2O_DMA1_CFG		0x214
-
-/* Configuration Table */
-#define CFGTBL_ChangeReq        0x00000001l
-#define CFGTBL_AccCmds          0x00000001l
-#define DOORBELL_CTLR_RESET     0x00000004l
-#define DOORBELL_CTLR_RESET2    0x00000020l
-
-#define CFGTBL_Trans_Simple     0x00000002l
-#define CFGTBL_Trans_Performant 0x00000004l
-#define CFGTBL_Trans_use_short_tags 0x20000000l
-
-#define CFGTBL_BusType_Ultra2   0x00000001l
-#define CFGTBL_BusType_Ultra3   0x00000002l
-#define CFGTBL_BusType_Fibre1G  0x00000100l
-#define CFGTBL_BusType_Fibre2G  0x00000200l
-typedef struct _vals32
-{
-        __u32   lower;
-        __u32   upper;
-} vals32;
-
-typedef union _u64bit
-{
-   vals32	val32;
-   __u64	val;
-} u64bit;
-
-/* Type defs used in the following structs */
-#define QWORD vals32 
-
-/* STRUCTURES */
-#define CISS_MAX_PHYS_LUN	1024
-/* SCSI-3 Cmmands */
-
-#pragma pack(1)	
-
-#define CISS_INQUIRY 0x12
-/* Date returned */
-typedef struct _InquiryData_struct
-{
-  BYTE data_byte[36];
-} InquiryData_struct;
-
-#define CISS_REPORT_LOG 0xc2    /* Report Logical LUNs */
-#define CISS_REPORT_PHYS 0xc3   /* Report Physical LUNs */
-/* Data returned */
-typedef struct _ReportLUNdata_struct
-{
-  BYTE LUNListLength[4];
-  DWORD reserved;
-  BYTE LUN[CISS_MAX_LUN][8];
-} ReportLunData_struct;
-
-#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ 
-typedef struct _ReadCapdata_struct
-{
-  BYTE total_size[4];	/* Total size in blocks */
-  BYTE block_size[4];	/* Size of blocks in bytes */
-} ReadCapdata_struct;
-
-#define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */
-
-/* service action to differentiate a 16 byte read capacity from
-   other commands that use the 0x9e SCSI op code */
-
-#define CCISS_READ_CAPACITY_16_SERVICE_ACT 0x10
-
-typedef struct _ReadCapdata_struct_16
-{
-	BYTE total_size[8];   /* Total size in blocks */
-	BYTE block_size[4];   /* Size of blocks in bytes */
-	BYTE prot_en:1;       /* protection enable bit */
-	BYTE rto_en:1;        /* reference tag own enable bit */
-	BYTE reserved:6;      /* reserved bits */
-	BYTE reserved2[18];   /* reserved bytes per spec */
-} ReadCapdata_struct_16;
-
-/* Define the supported read/write commands for cciss based controllers */
-
-#define CCISS_READ_10   0x28    /* Read(10)  */
-#define CCISS_WRITE_10  0x2a    /* Write(10) */
-#define CCISS_READ_16   0x88    /* Read(16)  */
-#define CCISS_WRITE_16  0x8a    /* Write(16) */
-
-/* Define the CDB lengths supported by cciss based controllers */
-
-#define CDB_LEN10	10
-#define CDB_LEN16	16
-
-/* BMIC commands */
-#define BMIC_READ 0x26
-#define BMIC_WRITE 0x27
-#define BMIC_CACHE_FLUSH 0xc2
-#define CCISS_CACHE_FLUSH 0x01	/* C2 was already being used by CCISS */
-
-#define CCISS_ABORT_MSG 0x00
-#define CCISS_RESET_MSG 0x01
-#define CCISS_RESET_TYPE_CONTROLLER 0x00
-#define CCISS_RESET_TYPE_BUS 0x01
-#define CCISS_RESET_TYPE_TARGET 0x03
-#define CCISS_RESET_TYPE_LUN 0x04
-#define CCISS_NOOP_MSG 0x03
-
-/* Command List Structure */
-#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
-
-typedef struct _CommandListHeader_struct {
-  BYTE              ReplyQueue;
-  BYTE              SGList;
-  HWORD             SGTotal;
-  QWORD             Tag;
-  LUNAddr_struct    LUN;
-} CommandListHeader_struct;
-typedef struct _ErrDescriptor_struct {
-  QWORD  Addr;
-  DWORD  Len;
-} ErrDescriptor_struct;
-typedef struct _SGDescriptor_struct {
-  QWORD  Addr;
-  DWORD  Len;
-  DWORD  Ext;
-} SGDescriptor_struct;
-
-/* Command types */
-#define CMD_RWREQ       0x00
-#define CMD_IOCTL_PEND  0x01
-#define CMD_SCSI	0x03
-#define CMD_MSG_DONE	0x04
-#define CMD_MSG_TIMEOUT 0x05
-#define CMD_MSG_STALE	0xff
-
-/* This structure needs to be divisible by COMMANDLIST_ALIGNMENT
- * because low bits of the address are used to to indicate that
- * whether the tag contains an index or an address.  PAD_32 and
- * PAD_64 can be adjusted independently as needed for 32-bit
- * and 64-bits systems.
- */
-#define COMMANDLIST_ALIGNMENT (32)
-#define IS_64_BIT ((sizeof(long) - 4)/4)
-#define IS_32_BIT (!IS_64_BIT)
-#define PAD_32 (0)
-#define PAD_64 (4)
-#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
-#define DIRECT_LOOKUP_BIT 0x10
-#define DIRECT_LOOKUP_SHIFT 5
-
-typedef struct _CommandList_struct {
-  CommandListHeader_struct Header;
-  RequestBlock_struct      Request;
-  ErrDescriptor_struct     ErrDesc;
-  SGDescriptor_struct      SG[MAXSGENTRIES];
-	/* information associated with the command */ 
-  __u32			   busaddr; /* physical address of this record */
-  ErrorInfo_struct * 	   err_info; /* pointer to the allocated mem */ 
-  int			   ctlr;
-  int			   cmd_type; 
-  long			   cmdindex;
-  struct list_head list;
-  struct request *	   rq;
-  struct completion *waiting;
-  int	 retry_count;
-  void * scsi_cmd;
-  char pad[PADSIZE];
-} CommandList_struct;
-
-/* Configuration Table Structure */
-typedef struct _HostWrite_struct {
-  DWORD TransportRequest;
-  DWORD Reserved;
-  DWORD CoalIntDelay;
-  DWORD CoalIntCount;
-} HostWrite_struct;
-
-typedef struct _CfgTable_struct {
-  BYTE             Signature[4];
-  DWORD            SpecValence;
-#define SIMPLE_MODE	0x02
-#define PERFORMANT_MODE	0x04
-#define MEMQ_MODE	0x08
-  DWORD            TransportSupport;
-  DWORD            TransportActive;
-  HostWrite_struct HostWrite;
-  DWORD            CmdsOutMax;
-  DWORD            BusTypes;
-  DWORD            TransMethodOffset;
-  BYTE             ServerName[16];
-  DWORD            HeartBeat;
-  DWORD            SCSI_Prefetch;
-  DWORD            MaxSGElements;
-  DWORD            MaxLogicalUnits;
-  DWORD            MaxPhysicalDrives;
-  DWORD            MaxPhysicalDrivesPerLogicalUnit;
-  DWORD            MaxPerformantModeCommands;
-  u8		   reserved[0x78 - 0x58];
-  u32		   misc_fw_support; /* offset 0x78 */
-#define MISC_FW_DOORBELL_RESET (0x02)
-#define MISC_FW_DOORBELL_RESET2 (0x10)
-	u8	   driver_version[32];
-} CfgTable_struct;
-
-struct TransTable_struct {
-  u32 BlockFetch0;
-  u32 BlockFetch1;
-  u32 BlockFetch2;
-  u32 BlockFetch3;
-  u32 BlockFetch4;
-  u32 BlockFetch5;
-  u32 BlockFetch6;
-  u32 BlockFetch7;
-  u32 RepQSize;
-  u32 RepQCount;
-  u32 RepQCtrAddrLow32;
-  u32 RepQCtrAddrHigh32;
-  u32 RepQAddr0Low32;
-  u32 RepQAddr0High32;
-};
-
-#pragma pack()	 
-#endif /* CCISS_CMD_H */
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
deleted file mode 100644
index 01a1f7e249782499e66712ccf3abd854344bc5dc..0000000000000000000000000000000000000000
--- a/drivers/block/cciss_scsi.c
+++ /dev/null
@@ -1,1653 +0,0 @@
-/*
- *    Disk Array driver for HP Smart Array controllers, SCSI Tape module.
- *    (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
- *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- *    General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA
- *    02111-1307, USA.
- *
- *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- *    Author: Stephen M. Cameron
- */
-#ifdef CONFIG_CISS_SCSI_TAPE
-
-/* Here we have code to present the driver as a scsi driver
-   as it is simultaneously presented as a block driver.  The
-   reason for doing this is to allow access to SCSI tape drives
-   through the array controller.  Note in particular, neither
-   physical nor logical disks are presented through the scsi layer. */
-
-#include <linux/timer.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include <linux/atomic.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include "cciss_scsi.h"
-
-#define CCISS_ABORT_MSG 0x00
-#define CCISS_RESET_MSG 0x01
-
-static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
-	size_t size,
-	__u8 page_code, unsigned char *scsi3addr,
-	int cmd_type);
-
-static CommandList_struct *cmd_alloc(ctlr_info_t *h);
-static CommandList_struct *cmd_special_alloc(ctlr_info_t *h);
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c);
-static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c);
-
-static int cciss_scsi_write_info(struct Scsi_Host *sh,
-		char *buffer, /* data buffer */
-		int length); 	   /* length of data in buffer */
-static int cciss_scsi_show_info(struct seq_file *m,
-				struct Scsi_Host *sh);
-
-static int cciss_scsi_queue_command (struct Scsi_Host *h,
-				     struct scsi_cmnd *cmd);
-static int cciss_eh_device_reset_handler(struct scsi_cmnd *);
-static int cciss_eh_abort_handler(struct scsi_cmnd *);
-
-static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR] = {
-	{ .name = "cciss0", .ndevices = 0 },
-	{ .name = "cciss1", .ndevices = 0 },
-	{ .name = "cciss2", .ndevices = 0 },
-	{ .name = "cciss3", .ndevices = 0 },
-	{ .name = "cciss4", .ndevices = 0 },
-	{ .name = "cciss5", .ndevices = 0 },
-	{ .name = "cciss6", .ndevices = 0 },
-	{ .name = "cciss7", .ndevices = 0 },
-};
-
-static struct scsi_host_template cciss_driver_template = {
-	.module			= THIS_MODULE,
-	.name			= "cciss",
-	.proc_name		= "cciss",
-	.write_info		= cciss_scsi_write_info,
-	.show_info		= cciss_scsi_show_info,
-	.queuecommand		= cciss_scsi_queue_command,
-	.this_id		= 7,
-	.use_clustering		= DISABLE_CLUSTERING,
-	/* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */
-	.eh_device_reset_handler= cciss_eh_device_reset_handler,
-	.eh_abort_handler	= cciss_eh_abort_handler,
-};
-
-#pragma pack(1)
-
-#define SCSI_PAD_32 8
-#define SCSI_PAD_64 8
-
-struct cciss_scsi_cmd_stack_elem_t {
-	CommandList_struct cmd;
-	ErrorInfo_struct Err;
-	__u32 busaddr;
-	int cmdindex;
-	u8 pad[IS_32_BIT * SCSI_PAD_32 + IS_64_BIT * SCSI_PAD_64];
-};
-
-#pragma pack()
-
-#pragma pack(1)
-struct cciss_scsi_cmd_stack_t {
-	struct cciss_scsi_cmd_stack_elem_t *pool;
-	struct cciss_scsi_cmd_stack_elem_t **elem;
-	dma_addr_t cmd_pool_handle;
-	int top;
-	int nelems;
-};
-#pragma pack()
-
-struct cciss_scsi_adapter_data_t {
-	struct Scsi_Host *scsi_host;
-	struct cciss_scsi_cmd_stack_t cmd_stack;
-	SGDescriptor_struct **cmd_sg_list;
-	int registered;
-	spinlock_t lock; // to protect ccissscsi[ctlr];
-};
-
-#define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \
-	&h->scsi_ctlr->lock, flags);
-#define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \
-	&h->scsi_ctlr->lock, flags);
-
-static CommandList_struct *
-scsi_cmd_alloc(ctlr_info_t *h)
-{
-	/* assume only one process in here at a time, locking done by caller. */
-	/* use h->lock */
-	/* might be better to rewrite how we allocate scsi commands in a way that */
-	/* needs no locking at all. */
-
-	/* take the top memory chunk off the stack and return it, if any. */
-	struct cciss_scsi_cmd_stack_elem_t *c;
-	struct cciss_scsi_adapter_data_t *sa;
-	struct cciss_scsi_cmd_stack_t *stk;
-	u64bit temp64;
-
-	sa = h->scsi_ctlr;
-	stk = &sa->cmd_stack;
-
-	if (stk->top < 0)
-		return NULL;
-	c = stk->elem[stk->top];
-	/* memset(c, 0, sizeof(*c)); */
-	memset(&c->cmd, 0, sizeof(c->cmd));
-	memset(&c->Err, 0, sizeof(c->Err));
-	/* set physical addr of cmd and addr of scsi parameters */
-	c->cmd.busaddr = c->busaddr;
-	c->cmd.cmdindex = c->cmdindex;
-	/* (__u32) (stk->cmd_pool_handle +
-		(sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */
-
-	temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct));
-	/* (__u64) (stk->cmd_pool_handle +
-		(sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top) +
-		 sizeof(CommandList_struct)); */
-	stk->top--;
-	c->cmd.ErrDesc.Addr.lower = temp64.val32.lower;
-	c->cmd.ErrDesc.Addr.upper = temp64.val32.upper;
-	c->cmd.ErrDesc.Len = sizeof(ErrorInfo_struct);
-
-	c->cmd.ctlr = h->ctlr;
-	c->cmd.err_info = &c->Err;
-
-	return (CommandList_struct *) c;
-}
-
-static void
-scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c)
-{
-	/* assume only one process in here at a time, locking done by caller. */
-	/* use h->lock */
-	/* drop the free memory chunk on top of the stack. */
-
-	struct cciss_scsi_adapter_data_t *sa;
-	struct cciss_scsi_cmd_stack_t *stk;
-
-	sa = h->scsi_ctlr;
-	stk = &sa->cmd_stack;
-	stk->top++;
-	if (stk->top >= stk->nelems) {
-		dev_err(&h->pdev->dev,
-			"scsi_cmd_free called too many times.\n");
-		BUG();
-	}
-	stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c;
-}
-
-static int
-scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
-{
-	int i;
-	struct cciss_scsi_cmd_stack_t *stk;
-	size_t size;
-
-	stk = &sa->cmd_stack;
-	stk->nelems = cciss_tape_cmds + 2;
-	sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
-		h->chainsize, stk->nelems);
-	if (!sa->cmd_sg_list && h->chainsize > 0)
-		return -ENOMEM;
-
-	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
-
-	/* Check alignment, see cciss_cmd.h near CommandList_struct def. */
-	BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
-	/* pci_alloc_consistent guarantees 32-bit DMA address will be used */
-	stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
-		pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle);
-
-	if (stk->pool == NULL) {
-		cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
-		sa->cmd_sg_list = NULL;
-		return -ENOMEM;
-	}
-	stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL);
-	if (!stk->elem) {
-		pci_free_consistent(h->pdev, size, stk->pool,
-		stk->cmd_pool_handle);
-		return -1;
-	}
-	for (i = 0; i < stk->nelems; i++) {
-		stk->elem[i] = &stk->pool[i];
-		stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
-			(sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
-		stk->elem[i]->cmdindex = i;
-	}
-	stk->top = stk->nelems-1;
-	return 0;
-}
-
-static void
-scsi_cmd_stack_free(ctlr_info_t *h)
-{
-	struct cciss_scsi_adapter_data_t *sa;
-	struct cciss_scsi_cmd_stack_t *stk;
-	size_t size;
-
-	sa = h->scsi_ctlr;
-	stk = &sa->cmd_stack;
-	if (stk->top != stk->nelems-1) {
-		dev_warn(&h->pdev->dev,
-			"bug: %d scsi commands are still outstanding.\n",
-			stk->nelems - stk->top);
-	}
-	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
-
-	pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle);
-	stk->pool = NULL;
-	cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
-	kfree(stk->elem);
-	stk->elem = NULL;
-}
-
-#if 0
-static void
-print_cmd(CommandList_struct *cp)
-{
-	printk("queue:%d\n", cp->Header.ReplyQueue);
-	printk("sglist:%d\n", cp->Header.SGList);
-	printk("sgtot:%d\n", cp->Header.SGTotal);
-	printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
-			cp->Header.Tag.lower);
-	printk("LUN:0x%8phN\n", cp->Header.LUN.LunAddrBytes);
-	printk("CDBLen:%d\n", cp->Request.CDBLen);
-	printk("Type:%d\n",cp->Request.Type.Type);
-	printk("Attr:%d\n",cp->Request.Type.Attribute);
-	printk(" Dir:%d\n",cp->Request.Type.Direction);
-	printk("Timeout:%d\n",cp->Request.Timeout);
-	printk("CDB: %16ph\n", cp->Request.CDB);
-	printk("edesc.Addr: 0x%08x/0%08x, Len  = %d\n",
-		cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
-			cp->ErrDesc.Len);
-	printk("sgs..........Errorinfo:\n");
-	printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
-	printk("senselen:%d\n", cp->err_info->SenseLen);
-	printk("cmd status:%d\n", cp->err_info->CommandStatus);
-	printk("resid cnt:%d\n", cp->err_info->ResidualCnt);
-	printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
-	printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
-	printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-}
-#endif
-
-static int
-find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun)
-{
-	/* finds an unused bus, target, lun for a new device */
-	/* assumes h->scsi_ctlr->lock is held */
-	int i, found=0;
-	unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
-
-	memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
-
-	target_taken[SELF_SCSI_ID] = 1;
-	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++)
-		target_taken[ccissscsi[h->ctlr].dev[i].target] = 1;
-
-	for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) {
-		if (!target_taken[i]) {
-			*bus = 0; *target=i; *lun = 0; found=1;
-			break;
-		}
-	}
-	return (!found);
-}
-struct scsi2map {
-	char scsi3addr[8];
-	int bus, target, lun;
-};
-
-static int
-cciss_scsi_add_entry(ctlr_info_t *h, int hostno,
-		struct cciss_scsi_dev_t *device,
-		struct scsi2map *added, int *nadded)
-{
-	/* assumes h->scsi_ctlr->lock is held */
-	int n = ccissscsi[h->ctlr].ndevices;
-	struct cciss_scsi_dev_t *sd;
-	int i, bus, target, lun;
-	unsigned char addr1[8], addr2[8];
-
-	if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
-		dev_warn(&h->pdev->dev, "Too many devices, "
-			"some will be inaccessible.\n");
-		return -1;
-	}
-
-	bus = target = -1;
-	lun = 0;
-	/* Is this device a non-zero lun of a multi-lun device */
-	/* byte 4 of the 8-byte LUN addr will contain the logical unit no. */
-	if (device->scsi3addr[4] != 0) {
-		/* Search through our list and find the device which */
-		/* has the same 8 byte LUN address, excepting byte 4. */
-		/* Assign the same bus and target for this new LUN. */
-		/* Use the logical unit number from the firmware. */
-		memcpy(addr1, device->scsi3addr, 8);
-		addr1[4] = 0;
-		for (i = 0; i < n; i++) {
-			sd = &ccissscsi[h->ctlr].dev[i];
-			memcpy(addr2, sd->scsi3addr, 8);
-			addr2[4] = 0;
-			/* differ only in byte 4? */
-			if (memcmp(addr1, addr2, 8) == 0) {
-				bus = sd->bus;
-				target = sd->target;
-				lun = device->scsi3addr[4];
-				break;
-			}
-		}
-	}
-
-	sd = &ccissscsi[h->ctlr].dev[n];
-	if (lun == 0) {
-		if (find_bus_target_lun(h,
-			&sd->bus, &sd->target, &sd->lun) != 0)
-			return -1;
-	} else {
-		sd->bus = bus;
-		sd->target = target;
-		sd->lun = lun;
-	}
-	added[*nadded].bus = sd->bus;
-	added[*nadded].target = sd->target;
-	added[*nadded].lun = sd->lun;
-	(*nadded)++;
-
-	memcpy(sd->scsi3addr, device->scsi3addr, 8);
-	memcpy(sd->vendor, device->vendor, sizeof(sd->vendor));
-	memcpy(sd->revision, device->revision, sizeof(sd->revision));
-	memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
-	sd->devtype = device->devtype;
-
-	ccissscsi[h->ctlr].ndevices++;
-
-	/* initially, (before registering with scsi layer) we don't
-	   know our hostno and we don't want to print anything first
-	   time anyway (the scsi layer's inquiries will show that info) */
-	if (hostno != -1)
-		dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
-			scsi_device_type(sd->devtype), hostno,
-			sd->bus, sd->target, sd->lun);
-	return 0;
-}
-
-static void
-cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry,
-	struct scsi2map *removed, int *nremoved)
-{
-	/* assumes h->ctlr]->scsi_ctlr->lock is held */
-	int i;
-	struct cciss_scsi_dev_t sd;
-
-	if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
-	sd = ccissscsi[h->ctlr].dev[entry];
-	removed[*nremoved].bus    = sd.bus;
-	removed[*nremoved].target = sd.target;
-	removed[*nremoved].lun    = sd.lun;
-	(*nremoved)++;
-	for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++)
-		ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1];
-	ccissscsi[h->ctlr].ndevices--;
-	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
-		scsi_device_type(sd.devtype), hostno,
-			sd.bus, sd.target, sd.lun);
-}
-
-
-#define SCSI3ADDR_EQ(a,b) ( \
-	(a)[7] == (b)[7] && \
-	(a)[6] == (b)[6] && \
-	(a)[5] == (b)[5] && \
-	(a)[4] == (b)[4] && \
-	(a)[3] == (b)[3] && \
-	(a)[2] == (b)[2] && \
-	(a)[1] == (b)[1] && \
-	(a)[0] == (b)[0])
-
-static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr)
-{
-	/* called when scsi_add_device fails in order to re-adjust */
-	/* ccissscsi[] to match the mid layer's view. */
-	unsigned long flags;
-	int i, j;
-	CPQ_TAPE_LOCK(h, flags);
-	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
-		if (memcmp(scsi3addr,
-				ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) {
-			for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++)
-				ccissscsi[h->ctlr].dev[j] =
-					ccissscsi[h->ctlr].dev[j+1];
-			ccissscsi[h->ctlr].ndevices--;
-			break;
-		}
-	}
-	CPQ_TAPE_UNLOCK(h, flags);
-}
-
-static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
-	struct cciss_scsi_dev_t *dev2)
-{
-	return dev1->devtype == dev2->devtype &&
-		memcmp(dev1->scsi3addr, dev2->scsi3addr,
-			sizeof(dev1->scsi3addr)) == 0 &&
-		memcmp(dev1->device_id, dev2->device_id,
-			sizeof(dev1->device_id)) == 0 &&
-		memcmp(dev1->vendor, dev2->vendor,
-			sizeof(dev1->vendor)) == 0 &&
-		memcmp(dev1->model, dev2->model,
-			sizeof(dev1->model)) == 0 &&
-		memcmp(dev1->revision, dev2->revision,
-			sizeof(dev1->revision)) == 0;
-}
-
-static int
-adjust_cciss_scsi_table(ctlr_info_t *h, int hostno,
-	struct cciss_scsi_dev_t sd[], int nsds)
-{
-	/* sd contains scsi3 addresses and devtypes, but
-	   bus target and lun are not filled in.  This funciton
-	   takes what's in sd to be the current and adjusts
-	   ccissscsi[] to be in line with what's in sd. */
-
-	int i,j, found, changes=0;
-	struct cciss_scsi_dev_t *csd;
-	unsigned long flags;
-	struct scsi2map *added, *removed;
-	int nadded, nremoved;
-	struct Scsi_Host *sh = NULL;
-
-	added = kzalloc(sizeof(*added) * CCISS_MAX_SCSI_DEVS_PER_HBA,
-			GFP_KERNEL);
-	removed = kzalloc(sizeof(*removed) * CCISS_MAX_SCSI_DEVS_PER_HBA,
-			GFP_KERNEL);
-
-	if (!added || !removed) {
-		dev_warn(&h->pdev->dev,
-			"Out of memory in adjust_cciss_scsi_table\n");
-		goto free_and_out;
-	}
-
-	CPQ_TAPE_LOCK(h, flags);
-
-	if (hostno != -1)  /* if it's not the first time... */
-		sh = h->scsi_ctlr->scsi_host;
-
-	/* find any devices in ccissscsi[] that are not in
-	   sd[] and remove them from ccissscsi[] */
-
-	i = 0;
-	nremoved = 0;
-	nadded = 0;
-	while (i < ccissscsi[h->ctlr].ndevices) {
-		csd = &ccissscsi[h->ctlr].dev[i];
-		found=0;
-		for (j=0;j<nsds;j++) {
-			if (SCSI3ADDR_EQ(sd[j].scsi3addr,
-				csd->scsi3addr)) {
-				if (device_is_the_same(&sd[j], csd))
-					found=2;
-				else
-					found=1;
-				break;
-			}
-		}
-
-		if (found == 0) { /* device no longer present. */
-			changes++;
-			cciss_scsi_remove_entry(h, hostno, i,
-				removed, &nremoved);
-			/* remove ^^^, hence i not incremented */
-		} else if (found == 1) { /* device is different in some way */
-			changes++;
-			dev_info(&h->pdev->dev,
-				"device c%db%dt%dl%d has changed.\n",
-				hostno, csd->bus, csd->target, csd->lun);
-			cciss_scsi_remove_entry(h, hostno, i,
-				removed, &nremoved);
-			/* remove ^^^, hence i not incremented */
-			if (cciss_scsi_add_entry(h, hostno, &sd[j],
-				added, &nadded) != 0)
-				/* we just removed one, so add can't fail. */
-					BUG();
-			csd->devtype = sd[j].devtype;
-			memcpy(csd->device_id, sd[j].device_id,
-				sizeof(csd->device_id));
-			memcpy(csd->vendor, sd[j].vendor,
-				sizeof(csd->vendor));
-			memcpy(csd->model, sd[j].model,
-				sizeof(csd->model));
-			memcpy(csd->revision, sd[j].revision,
-				sizeof(csd->revision));
-		} else 		/* device is same as it ever was, */
-			i++;	/* so just move along. */
-	}
-
-	/* Now, make sure every device listed in sd[] is also
- 	   listed in ccissscsi[], adding them if they aren't found */
-
-	for (i=0;i<nsds;i++) {
-		found=0;
-		for (j = 0; j < ccissscsi[h->ctlr].ndevices; j++) {
-			csd = &ccissscsi[h->ctlr].dev[j];
-			if (SCSI3ADDR_EQ(sd[i].scsi3addr,
-				csd->scsi3addr)) {
-				if (device_is_the_same(&sd[i], csd))
-					found=2;	/* found device */
-				else
-					found=1; 	/* found a bug. */
-				break;
-			}
-		}
-		if (!found) {
-			changes++;
-			if (cciss_scsi_add_entry(h, hostno, &sd[i],
-				added, &nadded) != 0)
-				break;
-		} else if (found == 1) {
-			/* should never happen... */
-			changes++;
-			dev_warn(&h->pdev->dev,
-				"device unexpectedly changed\n");
-			/* but if it does happen, we just ignore that device */
-		}
-	}
-	CPQ_TAPE_UNLOCK(h, flags);
-
-	/* Don't notify scsi mid layer of any changes the first time through */
-	/* (or if there are no changes) scsi_scan_host will do it later the */
-	/* first time through. */
-	if (hostno == -1 || !changes)
-		goto free_and_out;
-
-	/* Notify scsi mid layer of any removed devices */
-	for (i = 0; i < nremoved; i++) {
-		struct scsi_device *sdev =
-			scsi_device_lookup(sh, removed[i].bus,
-				removed[i].target, removed[i].lun);
-		if (sdev != NULL) {
-			scsi_remove_device(sdev);
-			scsi_device_put(sdev);
-		} else {
-			/* We don't expect to get here. */
-			/* future cmds to this device will get selection */
-			/* timeout as if the device was gone. */
-			dev_warn(&h->pdev->dev, "didn't find "
-				"c%db%dt%dl%d\n for removal.",
-				hostno, removed[i].bus,
-				removed[i].target, removed[i].lun);
-		}
-	}
-
-	/* Notify scsi mid layer of any added devices */
-	for (i = 0; i < nadded; i++) {
-		int rc;
-		rc = scsi_add_device(sh, added[i].bus,
-			added[i].target, added[i].lun);
-		if (rc == 0)
-			continue;
-		dev_warn(&h->pdev->dev, "scsi_add_device "
-			"c%db%dt%dl%d failed, device not added.\n",
-			hostno, added[i].bus, added[i].target, added[i].lun);
-		/* now we have to remove it from ccissscsi, */
-		/* since it didn't get added to scsi mid layer */
-		fixup_botched_add(h, added[i].scsi3addr);
-	}
-
-free_and_out:
-	kfree(added);
-	kfree(removed);
-	return 0;
-}
-
-static int
-lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr)
-{
-	int i;
-	struct cciss_scsi_dev_t *sd;
-	unsigned long flags;
-
-	CPQ_TAPE_LOCK(h, flags);
-	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
-		sd = &ccissscsi[h->ctlr].dev[i];
-		if (sd->bus == bus &&
-		    sd->target == target &&
-		    sd->lun == lun) {
-			memcpy(scsi3addr, &sd->scsi3addr[0], 8);
-			CPQ_TAPE_UNLOCK(h, flags);
-			return 0;
-		}
-	}
-	CPQ_TAPE_UNLOCK(h, flags);
-	return -1;
-}
-
-static void
-cciss_scsi_setup(ctlr_info_t *h)
-{
-	struct cciss_scsi_adapter_data_t * shba;
-
-	ccissscsi[h->ctlr].ndevices = 0;
-	shba = kmalloc(sizeof(*shba), GFP_KERNEL);
-	if (shba == NULL)
-		return;
-	shba->scsi_host = NULL;
-	spin_lock_init(&shba->lock);
-	shba->registered = 0;
-	if (scsi_cmd_stack_setup(h, shba) != 0) {
-		kfree(shba);
-		shba = NULL;
-	}
-	h->scsi_ctlr = shba;
-	return;
-}
-
-static void complete_scsi_command(CommandList_struct *c, int timeout,
-	__u32 tag)
-{
-	struct scsi_cmnd *cmd;
-	ctlr_info_t *h;
-	ErrorInfo_struct *ei;
-
-	ei = c->err_info;
-
-	/* First, see if it was a message rather than a command */
-	if (c->Request.Type.Type == TYPE_MSG)  {
-		c->cmd_type = CMD_MSG_DONE;
-		return;
-	}
-
-	cmd = (struct scsi_cmnd *) c->scsi_cmd;
-	h = hba[c->ctlr];
-
-	scsi_dma_unmap(cmd);
-	if (c->Header.SGTotal > h->max_cmd_sgentries)
-		cciss_unmap_sg_chain_block(h, c);
-
-	cmd->result = (DID_OK << 16); 		/* host byte */
-	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
-	/* cmd->result |= (GOOD < 1); */		/* status byte */
-
-	cmd->result |= (ei->ScsiStatus);
-	/* printk("Scsistatus is 0x%02x\n", ei->ScsiStatus);  */
-
-	/* copy the sense data whether we need to or not. */
-
-	memcpy(cmd->sense_buffer, ei->SenseInfo,
-		ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
-			SCSI_SENSE_BUFFERSIZE :
-			ei->SenseLen);
-	scsi_set_resid(cmd, ei->ResidualCnt);
-
-	if (ei->CommandStatus != 0) { /* an error has occurred */
-		switch (ei->CommandStatus) {
-			case CMD_TARGET_STATUS:
-				/* Pass it up to the upper layers... */
-				if (!ei->ScsiStatus) {
-
-	/* Ordinarily, this case should never happen, but there is a bug
-	   in some released firmware revisions that allows it to happen
-	   if, for example, a 4100 backplane loses power and the tape
-	   drive is in it.  We assume that it's a fatal error of some
-	   kind because we can't show that it wasn't. We will make it
-	   look like selection timeout since that is the most common
-	   reason for this to occur, and it's severe enough. */
-
-					cmd->result = DID_NO_CONNECT << 16;
-				}
-			break;
-			case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
-			break;
-			case CMD_DATA_OVERRUN:
-				dev_warn(&h->pdev->dev, "%p has"
-					" completed with data overrun "
-					"reported\n", c);
-			break;
-			case CMD_INVALID: {
-				/*
-				print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
-				print_cmd(c);
-				 */
-     /* We get CMD_INVALID if you address a non-existent tape drive instead
-	of a selection timeout (no response).  You will see this if you yank
-	out a tape drive, then try to access it. This is kind of a shame
-	because it means that any other CMD_INVALID (e.g. driver bug) will
-	get interpreted as a missing target. */
-				cmd->result = DID_NO_CONNECT << 16;
-				}
-			break;
-			case CMD_PROTOCOL_ERR:
-				cmd->result = DID_ERROR << 16;
-				dev_warn(&h->pdev->dev,
-					"%p has protocol error\n", c);
-                        break;
-			case CMD_HARDWARE_ERR:
-				cmd->result = DID_ERROR << 16;
-				dev_warn(&h->pdev->dev,
-					"%p had hardware error\n", c);
-                        break;
-			case CMD_CONNECTION_LOST:
-				cmd->result = DID_ERROR << 16;
-				dev_warn(&h->pdev->dev,
-					"%p had connection lost\n", c);
-			break;
-			case CMD_ABORTED:
-				cmd->result = DID_ABORT << 16;
-				dev_warn(&h->pdev->dev, "%p was aborted\n", c);
-			break;
-			case CMD_ABORT_FAILED:
-				cmd->result = DID_ERROR << 16;
-				dev_warn(&h->pdev->dev,
-					"%p reports abort failed\n", c);
-			break;
-			case CMD_UNSOLICITED_ABORT:
-				cmd->result = DID_ABORT << 16;
-				dev_warn(&h->pdev->dev, "%p aborted due to an "
-					"unsolicited abort\n", c);
-			break;
-			case CMD_TIMEOUT:
-				cmd->result = DID_TIME_OUT << 16;
-				dev_warn(&h->pdev->dev, "%p timedout\n", c);
-			break;
-			case CMD_UNABORTABLE:
-				cmd->result = DID_ERROR << 16;
-				dev_warn(&h->pdev->dev, "c %p command "
-					"unabortable\n", c);
-			break;
-			default:
-				cmd->result = DID_ERROR << 16;
-				dev_warn(&h->pdev->dev,
-					"%p returned unknown status %x\n", c,
-						ei->CommandStatus);
-		}
-	}
-	cmd->scsi_done(cmd);
-	scsi_cmd_free(h, c);
-}
-
-static int
-cciss_scsi_detect(ctlr_info_t *h)
-{
-	struct Scsi_Host *sh;
-	int error;
-
-	sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *));
-	if (sh == NULL)
-		goto fail;
-	sh->io_port = 0;	// good enough?  FIXME,
-	sh->n_io_port = 0;	// I don't think we use these two...
-	sh->this_id = SELF_SCSI_ID;
-	sh->can_queue = cciss_tape_cmds;
-	sh->sg_tablesize = h->maxsgentries;
-	sh->max_cmd_len = MAX_COMMAND_SIZE;
-	sh->max_sectors = h->cciss_max_sectors;
-
-	((struct cciss_scsi_adapter_data_t *)
-		h->scsi_ctlr)->scsi_host = sh;
-	sh->hostdata[0] = (unsigned long) h;
-	sh->irq = h->intr[SIMPLE_MODE_INT];
-	sh->unique_id = sh->irq;
-	error = scsi_add_host(sh, &h->pdev->dev);
-	if (error)
-		goto fail_host_put;
-	scsi_scan_host(sh);
-	return 1;
-
- fail_host_put:
-	scsi_host_put(sh);
- fail:
-	return 0;
-}
-
-static void
-cciss_unmap_one(struct pci_dev *pdev,
-		CommandList_struct *c,
-		size_t buflen,
-		int data_direction)
-{
-	u64bit addr64;
-
-	addr64.val32.lower = c->SG[0].Addr.lower;
-	addr64.val32.upper = c->SG[0].Addr.upper;
-	pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
-}
-
-static void
-cciss_map_one(struct pci_dev *pdev,
-		CommandList_struct *c,
-		unsigned char *buf,
-		size_t buflen,
-		int data_direction)
-{
-	__u64 addr64;
-
-	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
-	c->SG[0].Addr.lower =
-	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
-	c->SG[0].Addr.upper =
-	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
-	c->SG[0].Len = buflen;
-	c->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
-	c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
-}
-
-static int
-cciss_scsi_do_simple_cmd(ctlr_info_t *h,
-			CommandList_struct *c,
-			unsigned char *scsi3addr,
-			unsigned char *cdb,
-			unsigned char cdblen,
-			unsigned char *buf, int bufsize,
-			int direction)
-{
-	DECLARE_COMPLETION_ONSTACK(wait);
-
-	c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */
-	c->scsi_cmd = NULL;
-	c->Header.ReplyQueue = 0;  /* unused in simple mode */
-	memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN));
-	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
-	// Fill in the request block...
-
-	/* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n",
-		scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
-		scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
-
-	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
-	memcpy(c->Request.CDB, cdb, cdblen);
-	c->Request.Timeout = 0;
-	c->Request.CDBLen = cdblen;
-	c->Request.Type.Type = TYPE_CMD;
-	c->Request.Type.Attribute = ATTR_SIMPLE;
-	c->Request.Type.Direction = direction;
-
-	/* Fill in the SG list and do dma mapping */
-	cciss_map_one(h->pdev, c, (unsigned char *) buf,
-			bufsize, DMA_FROM_DEVICE);
-
-	c->waiting = &wait;
-	enqueue_cmd_and_start_io(h, c);
-	wait_for_completion(&wait);
-
-	/* undo the dma mapping */
-	cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE);
-	return(0);
-}
-
-static void
-cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
-{
-	ErrorInfo_struct *ei;
-
-	ei = c->err_info;
-	switch (ei->CommandStatus) {
-		case CMD_TARGET_STATUS:
-			dev_warn(&h->pdev->dev,
-				"cmd %p has completed with errors\n", c);
-			dev_warn(&h->pdev->dev,
-				"cmd %p has SCSI Status = %x\n",
-				c, ei->ScsiStatus);
-			if (ei->ScsiStatus == 0)
-				dev_warn(&h->pdev->dev,
-				"SCSI status is abnormally zero.  "
-				"(probably indicates selection timeout "
-				"reported incorrectly due to a known "
-				"firmware bug, circa July, 2001.)\n");
-		break;
-		case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
-			dev_info(&h->pdev->dev, "UNDERRUN\n");
-		break;
-		case CMD_DATA_OVERRUN:
-			dev_warn(&h->pdev->dev, "%p has"
-				" completed with data overrun "
-				"reported\n", c);
-		break;
-		case CMD_INVALID: {
-			/* controller unfortunately reports SCSI passthru's */
-			/* to non-existent targets as invalid commands. */
-			dev_warn(&h->pdev->dev,
-				"%p is reported invalid (probably means "
-				"target device no longer present)\n", c);
-			/*
-			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
-			print_cmd(c);
-			 */
-			}
-		break;
-		case CMD_PROTOCOL_ERR:
-			dev_warn(&h->pdev->dev, "%p has protocol error\n", c);
-		break;
-		case CMD_HARDWARE_ERR:
-			/* cmd->result = DID_ERROR << 16; */
-			dev_warn(&h->pdev->dev, "%p had hardware error\n", c);
-		break;
-		case CMD_CONNECTION_LOST:
-			dev_warn(&h->pdev->dev, "%p had connection lost\n", c);
-		break;
-		case CMD_ABORTED:
-			dev_warn(&h->pdev->dev, "%p was aborted\n", c);
-		break;
-		case CMD_ABORT_FAILED:
-			dev_warn(&h->pdev->dev,
-				"%p reports abort failed\n", c);
-		break;
-		case CMD_UNSOLICITED_ABORT:
-			dev_warn(&h->pdev->dev,
-				"%p aborted due to an unsolicited abort\n", c);
-		break;
-		case CMD_TIMEOUT:
-			dev_warn(&h->pdev->dev, "%p timedout\n", c);
-		break;
-		case CMD_UNABORTABLE:
-			dev_warn(&h->pdev->dev,
-				"%p unabortable\n", c);
-		break;
-		default:
-			dev_warn(&h->pdev->dev,
-				"%p returned unknown status %x\n",
-				c, ei->CommandStatus);
-	}
-}
-
-static int
-cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr,
-	unsigned char page, unsigned char *buf,
-	unsigned char bufsize)
-{
-	int rc;
-	CommandList_struct *c;
-	char cdb[6];
-	ErrorInfo_struct *ei;
-	unsigned long flags;
-
-	spin_lock_irqsave(&h->lock, flags);
-	c = scsi_cmd_alloc(h);
-	spin_unlock_irqrestore(&h->lock, flags);
-
-	if (c == NULL) {			/* trouble... */
-		printk("cmd_alloc returned NULL!\n");
-		return -1;
-	}
-
-	ei = c->err_info;
-
-	cdb[0] = CISS_INQUIRY;
-	cdb[1] = (page != 0);
-	cdb[2] = page;
-	cdb[3] = 0;
-	cdb[4] = bufsize;
-	cdb[5] = 0;
-	rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb,
-				6, buf, bufsize, XFER_READ);
-
-	if (rc != 0) return rc; /* something went wrong */
-
-	if (ei->CommandStatus != 0 &&
-	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
-		cciss_scsi_interpret_error(h, c);
-		rc = -1;
-	}
-	spin_lock_irqsave(&h->lock, flags);
-	scsi_cmd_free(h, c);
-	spin_unlock_irqrestore(&h->lock, flags);
-	return rc;
-}
-
-/* Get the device id from inquiry page 0x83 */
-static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr,
-	unsigned char *device_id, int buflen)
-{
-	int rc;
-	unsigned char *buf;
-
-	if (buflen > 16)
-		buflen = 16;
-	buf = kzalloc(64, GFP_KERNEL);
-	if (!buf)
-		return -1;
-	rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
-	if (rc == 0)
-		memcpy(device_id, &buf[8], buflen);
-	kfree(buf);
-	return rc != 0;
-}
-
-static int
-cciss_scsi_do_report_phys_luns(ctlr_info_t *h,
-		ReportLunData_struct *buf, int bufsize)
-{
-	int rc;
-	CommandList_struct *c;
-	unsigned char cdb[12];
-	unsigned char scsi3addr[8];
-	ErrorInfo_struct *ei;
-	unsigned long flags;
-
-	spin_lock_irqsave(&h->lock, flags);
-	c = scsi_cmd_alloc(h);
-	spin_unlock_irqrestore(&h->lock, flags);
-	if (c == NULL) {			/* trouble... */
-		printk("cmd_alloc returned NULL!\n");
-		return -1;
-	}
-
-	memset(&scsi3addr[0], 0, 8); /* address the controller */
-	cdb[0] = CISS_REPORT_PHYS;
-	cdb[1] = 0;
-	cdb[2] = 0;
-	cdb[3] = 0;
-	cdb[4] = 0;
-	cdb[5] = 0;
-	cdb[6] = (bufsize >> 24) & 0xFF;  //MSB
-	cdb[7] = (bufsize >> 16) & 0xFF;
-	cdb[8] = (bufsize >> 8) & 0xFF;
-	cdb[9] = bufsize & 0xFF;
-	cdb[10] = 0;
-	cdb[11] = 0;
-
-	rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr,
-				cdb, 12,
-				(unsigned char *) buf,
-				bufsize, XFER_READ);
-
-	if (rc != 0) return rc; /* something went wrong */
-
-	ei = c->err_info;
-	if (ei->CommandStatus != 0 &&
-	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
-		cciss_scsi_interpret_error(h, c);
-		rc = -1;
-	}
-	spin_lock_irqsave(&h->lock, flags);
-	scsi_cmd_free(h, c);
-	spin_unlock_irqrestore(&h->lock, flags);
-	return rc;
-}
-
-static void
-cciss_update_non_disk_devices(ctlr_info_t *h, int hostno)
-{
-	/* the idea here is we could get notified from /proc
-	   that some devices have changed, so we do a report
-	   physical luns cmd, and adjust our list of devices
-	   accordingly.  (We can't rely on the scsi-mid layer just
-	   doing inquiries, because the "busses" that the scsi
-	   mid-layer probes are totally fabricated by this driver,
-	   so new devices wouldn't show up.
-
-	   the scsi3addr's of devices won't change so long as the
-	   adapter is not reset.  That means we can rescan and
-	   tell which devices we already know about, vs. new
-	   devices, vs.  disappearing devices.
-
-	   Also, if you yank out a tape drive, then put in a disk
-	   in it's place, (say, a configured volume from another
-	   array controller for instance)  _don't_ poke this driver
-           (so it thinks it's still a tape, but _do_ poke the scsi
-           mid layer, so it does an inquiry... the scsi mid layer
-           will see the physical disk.  This would be bad.  Need to
-	   think about how to prevent that.  One idea would be to
-	   snoop all scsi responses and if an inquiry repsonse comes
-	   back that reports a disk, chuck it an return selection
-	   timeout instead and adjust our table...  Not sure i like
-	   that though.
-
-	 */
-#define OBDR_TAPE_INQ_SIZE 49
-#define OBDR_TAPE_SIG "$DR-10"
-	ReportLunData_struct *ld_buff;
-	unsigned char *inq_buff;
-	unsigned char scsi3addr[8];
-	__u32 num_luns=0;
-	unsigned char *ch;
-	struct cciss_scsi_dev_t *currentsd, *this_device;
-	int ncurrent=0;
-	int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
-	int i;
-
-	ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
-	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
-	currentsd = kzalloc(sizeof(*currentsd) *
-			(CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL);
-	if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) {
-		printk(KERN_ERR "cciss: out of memory\n");
-		goto out;
-	}
-	this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
-	if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) {
-		ch = &ld_buff->LUNListLength[0];
-		num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
-		if (num_luns > CISS_MAX_PHYS_LUN) {
-			printk(KERN_WARNING
-				"cciss: Maximum physical LUNs (%d) exceeded.  "
-				"%d LUNs ignored.\n", CISS_MAX_PHYS_LUN,
-				num_luns - CISS_MAX_PHYS_LUN);
-			num_luns = CISS_MAX_PHYS_LUN;
-		}
-	}
-	else {
-		printk(KERN_ERR  "cciss: Report physical LUNs failed.\n");
-		goto out;
-	}
-
-
-	/* adjust our table of devices */
-	for (i = 0; i < num_luns; i++) {
-		/* for each physical lun, do an inquiry */
-		if (ld_buff->LUN[i][3] & 0xC0) continue;
-		memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
-		memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
-
-		if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
-			(unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
-			/* Inquiry failed (msg printed already) */
-			continue; /* so we will skip this device. */
-
-		this_device->devtype = (inq_buff[0] & 0x1f);
-		this_device->bus = -1;
-		this_device->target = -1;
-		this_device->lun = -1;
-		memcpy(this_device->scsi3addr, scsi3addr, 8);
-		memcpy(this_device->vendor, &inq_buff[8],
-			sizeof(this_device->vendor));
-		memcpy(this_device->model, &inq_buff[16],
-			sizeof(this_device->model));
-		memcpy(this_device->revision, &inq_buff[32],
-			sizeof(this_device->revision));
-		memset(this_device->device_id, 0,
-			sizeof(this_device->device_id));
-		cciss_scsi_get_device_id(h, scsi3addr,
-			this_device->device_id, sizeof(this_device->device_id));
-
-		switch (this_device->devtype) {
-		  case 0x05: /* CD-ROM */ {
-
-			/* We don't *really* support actual CD-ROM devices,
-			 * just this "One Button Disaster Recovery" tape drive
-			 * which temporarily pretends to be a CD-ROM drive.
-			 * So we check that the device is really an OBDR tape
-			 * device by checking for "$DR-10" in bytes 43-48 of
-			 * the inquiry data.
-			 */
-				char obdr_sig[7];
-
-				strncpy(obdr_sig, &inq_buff[43], 6);
-				obdr_sig[6] = '\0';
-				if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
-					/* Not OBDR device, ignore it. */
-					break;
-			}
-			/* fall through . . . */
-		  case 0x01: /* sequential access, (tape) */
-		  case 0x08: /* medium changer */
-			if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
-				printk(KERN_INFO "cciss%d: %s ignored, "
-					"too many devices.\n", h->ctlr,
-					scsi_device_type(this_device->devtype));
-				break;
-			}
-			currentsd[ncurrent] = *this_device;
-			ncurrent++;
-			break;
-		  default:
-			break;
-		}
-	}
-
-	adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent);
-out:
-	kfree(inq_buff);
-	kfree(ld_buff);
-	kfree(currentsd);
-	return;
-}
-
-static int
-is_keyword(char *ptr, int len, char *verb)  // Thanks to ncr53c8xx.c
-{
-	int verb_len = strlen(verb);
-	if (len >= verb_len && !memcmp(verb,ptr,verb_len))
-		return verb_len;
-	else
-		return 0;
-}
-
-static int
-cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length)
-{
-	int arg_len;
-
-	if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
-		cciss_update_non_disk_devices(h, hostno);
-	else
-		return -EINVAL;
-	return length;
-}
-
-static int
-cciss_scsi_write_info(struct Scsi_Host *sh,
-		char *buffer, /* data buffer */
-		int length) 	   /* length of data in buffer */
-{
-	ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0];
-	if (h == NULL)  /* This really shouldn't ever happen. */
-		return -EINVAL;
-
-	return cciss_scsi_user_command(h, sh->host_no,
-			buffer, length);
-}
-
-static int
-cciss_scsi_show_info(struct seq_file *m, struct Scsi_Host *sh)
-{
-
-	ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0];
-	int i;
-
-	if (h == NULL)  /* This really shouldn't ever happen. */
-		return -EINVAL;
-
-	seq_printf(m, "cciss%d: SCSI host: %d\n",
-			h->ctlr, sh->host_no);
-
-	/* this information is needed by apps to know which cciss
-	   device corresponds to which scsi host number without
-	   having to open a scsi target device node.  The device
-	   information is not a duplicate of /proc/scsi/scsi because
-	   the two may be out of sync due to scsi hotplug, rather
-	   this info is for an app to be able to use to know how to
-	   get them back in sync. */
-
-	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
-		struct cciss_scsi_dev_t *sd =
-			&ccissscsi[h->ctlr].dev[i];
-		seq_printf(m, "c%db%dt%dl%d %02d "
-			"0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-			sh->host_no, sd->bus, sd->target, sd->lun,
-			sd->devtype,
-			sd->scsi3addr[0], sd->scsi3addr[1],
-			sd->scsi3addr[2], sd->scsi3addr[3],
-			sd->scsi3addr[4], sd->scsi3addr[5],
-			sd->scsi3addr[6], sd->scsi3addr[7]);
-	}
-	return 0;
-}
-
-/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
-   dma mapping  and fills in the scatter gather entries of the
-   cciss command, c. */
-
-static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
-	struct scsi_cmnd *cmd)
-{
-	unsigned int len;
-	struct scatterlist *sg;
-	__u64 addr64;
-	int request_nsgs, i, chained, sg_index;
-	struct cciss_scsi_adapter_data_t *sa = h->scsi_ctlr;
-	SGDescriptor_struct *curr_sg;
-
-	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
-
-	chained = 0;
-	sg_index = 0;
-	curr_sg = c->SG;
-	request_nsgs = scsi_dma_map(cmd);
-	if (request_nsgs) {
-		scsi_for_each_sg(cmd, sg, request_nsgs, i) {
-			if (sg_index + 1 == h->max_cmd_sgentries &&
-				!chained && request_nsgs - i > 1) {
-				chained = 1;
-				sg_index = 0;
-				curr_sg = sa->cmd_sg_list[c->cmdindex];
-			}
-			addr64 = (__u64) sg_dma_address(sg);
-			len  = sg_dma_len(sg);
-			curr_sg[sg_index].Addr.lower =
-				(__u32) (addr64 & 0x0FFFFFFFFULL);
-			curr_sg[sg_index].Addr.upper =
-				(__u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
-			curr_sg[sg_index].Len = len;
-			curr_sg[sg_index].Ext = 0;
-			++sg_index;
-		}
-		if (chained)
-			cciss_map_sg_chain_block(h, c,
-				sa->cmd_sg_list[c->cmdindex],
-				(request_nsgs - (h->max_cmd_sgentries - 1)) *
-					sizeof(SGDescriptor_struct));
-	}
-	/* track how many SG entries we are using */
-	if (request_nsgs > h->maxSG)
-		h->maxSG = request_nsgs;
-	c->Header.SGTotal = (u16) request_nsgs + chained;
-	if (request_nsgs > h->max_cmd_sgentries)
-		c->Header.SGList = h->max_cmd_sgentries;
-	else
-		c->Header.SGList = c->Header.SGTotal;
-	return;
-}
-
-
-static int
-cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
-{
-	ctlr_info_t *h;
-	int rc;
-	unsigned char scsi3addr[8];
-	CommandList_struct *c;
-	unsigned long flags;
-
-	// Get the ptr to our adapter structure (hba[i]) out of cmd->host.
-	// We violate cmd->host privacy here.  (Is there another way?)
-	h = (ctlr_info_t *) cmd->device->host->hostdata[0];
-
-	rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id,
-			cmd->device->lun, scsi3addr);
-	if (rc != 0) {
-		/* the scsi nexus does not match any that we presented... */
-		/* pretend to mid layer that we got selection timeout */
-		cmd->result = DID_NO_CONNECT << 16;
-		done(cmd);
-		/* we might want to think about registering controller itself
-		   as a processor device on the bus so sg binds to it. */
-		return 0;
-	}
-
-	/* Ok, we have a reasonable scsi nexus, so send the cmd down, and
-           see what the device thinks of it. */
-
-	spin_lock_irqsave(&h->lock, flags);
-	c = scsi_cmd_alloc(h);
-	spin_unlock_irqrestore(&h->lock, flags);
-	if (c == NULL) {			/* trouble... */
-		dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
-		/* FIXME: next 3 lines are -> BAD! <- */
-		cmd->result = DID_NO_CONNECT << 16;
-		done(cmd);
-		return 0;
-	}
-
-	// Fill in the command list header
-
-	cmd->scsi_done = done;    // save this for use by completion code
-
-	/* save c in case we have to abort it */
-	cmd->host_scribble = (unsigned char *) c;
-
-	c->cmd_type = CMD_SCSI;
-	c->scsi_cmd = cmd;
-	c->Header.ReplyQueue = 0;  /* unused in simple mode */
-	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
-	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
-
-	// Fill in the request block...
-
-	c->Request.Timeout = 0;
-	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
-	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
-	c->Request.CDBLen = cmd->cmd_len;
-	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
-	c->Request.Type.Type = TYPE_CMD;
-	c->Request.Type.Attribute = ATTR_SIMPLE;
-	switch (cmd->sc_data_direction) {
-	  case DMA_TO_DEVICE:
-		c->Request.Type.Direction = XFER_WRITE;
-		break;
-	  case DMA_FROM_DEVICE:
-		c->Request.Type.Direction = XFER_READ;
-		break;
-	  case DMA_NONE:
-		c->Request.Type.Direction = XFER_NONE;
-		break;
-	  case DMA_BIDIRECTIONAL:
-		// This can happen if a buggy application does a scsi passthru
-		// and sets both inlen and outlen to non-zero. ( see
-		// ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
-
-		c->Request.Type.Direction = XFER_RSVD;
-		// This is technically wrong, and cciss controllers should
-		// reject it with CMD_INVALID, which is the most correct
-		// response, but non-fibre backends appear to let it
-		// slide by, and give the same results as if this field
-		// were set correctly.  Either way is acceptable for
-		// our purposes here.
-
-		break;
-
-	  default:
-		dev_warn(&h->pdev->dev, "unknown data direction: %d\n",
-			cmd->sc_data_direction);
-		BUG();
-		break;
-	}
-	cciss_scatter_gather(h, c, cmd);
-	enqueue_cmd_and_start_io(h, c);
-	/* the cmd'll come back via intr handler in complete_scsi_command()  */
-	return 0;
-}
-
-static DEF_SCSI_QCMD(cciss_scsi_queue_command)
-
-static void cciss_unregister_scsi(ctlr_info_t *h)
-{
-	struct cciss_scsi_adapter_data_t *sa;
-	struct cciss_scsi_cmd_stack_t *stk;
-	unsigned long flags;
-
-	/* we are being forcibly unloaded, and may not refuse. */
-
-	spin_lock_irqsave(&h->lock, flags);
-	sa = h->scsi_ctlr;
-	stk = &sa->cmd_stack;
-
-	/* if we weren't ever actually registered, don't unregister */
-	if (sa->registered) {
-		spin_unlock_irqrestore(&h->lock, flags);
-		scsi_remove_host(sa->scsi_host);
-		scsi_host_put(sa->scsi_host);
-		spin_lock_irqsave(&h->lock, flags);
-	}
-
-	/* set scsi_host to NULL so our detect routine will
-	   find us on register */
-	sa->scsi_host = NULL;
-	spin_unlock_irqrestore(&h->lock, flags);
-	scsi_cmd_stack_free(h);
-	kfree(sa);
-}
-
-static int cciss_engage_scsi(ctlr_info_t *h)
-{
-	struct cciss_scsi_adapter_data_t *sa;
-	struct cciss_scsi_cmd_stack_t *stk;
-	unsigned long flags;
-
-	spin_lock_irqsave(&h->lock, flags);
-	sa = h->scsi_ctlr;
-	stk = &sa->cmd_stack;
-
-	if (sa->registered) {
-		dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n");
-		spin_unlock_irqrestore(&h->lock, flags);
-		return -ENXIO;
-	}
-	sa->registered = 1;
-	spin_unlock_irqrestore(&h->lock, flags);
-	cciss_update_non_disk_devices(h, -1);
-	cciss_scsi_detect(h);
-	return 0;
-}
-
-static void
-cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h)
-{
-	unsigned long flags;
-
-	CPQ_TAPE_LOCK(h, flags);
-	seq_printf(seq,
-		"Sequential access devices: %d\n\n",
-			ccissscsi[h->ctlr].ndevices);
-	CPQ_TAPE_UNLOCK(h, flags);
-}
-
-static int wait_for_device_to_become_ready(ctlr_info_t *h,
-	unsigned char lunaddr[])
-{
-	int rc;
-	int count = 0;
-	int waittime = HZ;
-	CommandList_struct *c;
-
-	c = cmd_alloc(h);
-	if (!c) {
-		dev_warn(&h->pdev->dev, "out of memory in "
-			"wait_for_device_to_become_ready.\n");
-		return IO_ERROR;
-	}
-
-	/* Send test unit ready until device ready, or give up. */
-	while (count < 20) {
-
-		/* Wait for a bit.  do this first, because if we send
-		 * the TUR right away, the reset will just abort it.
-		 */
-		schedule_timeout_uninterruptible(waittime);
-		count++;
-
-		/* Increase wait time with each try, up to a point. */
-		if (waittime < (HZ * 30))
-			waittime = waittime * 2;
-
-		/* Send the Test Unit Ready */
-		rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0,
-			lunaddr, TYPE_CMD);
-		if (rc == 0)
-			rc = sendcmd_withirq_core(h, c, 0);
-
-		(void) process_sendcmd_error(h, c);
-
-		if (rc != 0)
-			goto retry_tur;
-
-		if (c->err_info->CommandStatus == CMD_SUCCESS)
-			break;
-
-		if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
-			c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
-			if (c->err_info->SenseInfo[2] == NO_SENSE)
-				break;
-			if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) {
-				unsigned char asc;
-				asc = c->err_info->SenseInfo[12];
-				check_for_unit_attention(h, c);
-				if (asc == POWER_OR_RESET)
-					break;
-			}
-		}
-retry_tur:
-		dev_warn(&h->pdev->dev, "Waiting %d secs "
-			"for device to become ready.\n",
-			waittime / HZ);
-		rc = 1; /* device not ready. */
-	}
-
-	if (rc)
-		dev_warn(&h->pdev->dev, "giving up on device.\n");
-	else
-		dev_warn(&h->pdev->dev, "device is ready.\n");
-
-	cmd_free(h, c);
-	return rc;
-}
-
-/* Need at least one of these error handlers to keep ../scsi/hosts.c from
- * complaining.  Doing a host- or bus-reset can't do anything good here.
- * Despite what it might say in scsi_error.c, there may well be commands
- * on the controller, as the cciss driver registers twice, once as a block
- * device for the logical drives, and once as a scsi device, for any tape
- * drives.  So we know there are no commands out on the tape drives, but we
- * don't know there are no commands on the controller, and it is likely
- * that there probably are, as the cciss block device is most commonly used
- * as a boot device (embedded controller on HP/Compaq systems.)
-*/
-
-static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
-{
-	int rc;
-	CommandList_struct *cmd_in_trouble;
-	unsigned char lunaddr[8];
-	ctlr_info_t *h;
-
-	/* find the controller to which the command to be aborted was sent */
-	h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
-	if (h == NULL) /* paranoia */
-		return FAILED;
-	dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n");
-	/* find the command that's giving us trouble */
-	cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
-	if (cmd_in_trouble == NULL) /* paranoia */
-		return FAILED;
-	memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
-	/* send a reset to the SCSI LUN which the command was sent to */
-	rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr,
-		TYPE_MSG);
-	if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0)
-		return SUCCESS;
-	dev_warn(&h->pdev->dev, "resetting device failed.\n");
-	return FAILED;
-}
-
-static int  cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
-{
-	int rc;
-	CommandList_struct *cmd_to_abort;
-	unsigned char lunaddr[8];
-	ctlr_info_t *h;
-
-	/* find the controller to which the command to be aborted was sent */
-	h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
-	if (h == NULL) /* paranoia */
-		return FAILED;
-	dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n");
-
-	/* find the command to be aborted */
-	cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
-	if (cmd_to_abort == NULL) /* paranoia */
-		return FAILED;
-	memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
-	rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag,
-		0, 0, lunaddr, TYPE_MSG);
-	if (rc == 0)
-		return SUCCESS;
-	return FAILED;
-
-}
-
-#else /* no CONFIG_CISS_SCSI_TAPE */
-
-/* If no tape support, then these become defined out of existence */
-
-#define cciss_scsi_setup(cntl_num)
-#define cciss_engage_scsi(h)
-
-#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
deleted file mode 100644
index e71d986727cac7c4a11b0e2f2b6c568ec4f2e829..0000000000000000000000000000000000000000
--- a/drivers/block/cciss_scsi.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *    Disk Array driver for HP Smart Array controllers, SCSI Tape module.
- *    (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
- *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- *    General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA
- *    02111-1307, USA.
- *
- *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-#ifdef CONFIG_CISS_SCSI_TAPE
-#ifndef _CCISS_SCSI_H_
-#define _CCISS_SCSI_H_
-
-#include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */
-
-		/* the scsi id of the adapter... */
-#define SELF_SCSI_ID 15
-		/* 15 is somewhat arbitrary, since the scsi-2 bus
-		   that's presented by the driver to the OS is
-		   fabricated.  The "real" scsi-3 bus the
-		   hardware presents is fabricated too.
-		   The actual, honest-to-goodness physical
-		   bus that the devices are attached to is not
-		   addressible natively, and may in fact turn
-		   out to be not scsi at all. */
-
-
-/* 
-
-If the upper scsi layer tries to track how many commands we have 
-outstanding, it will be operating under the misapprehension that it is
-the only one sending us requests.  We also have the block interface,
-which is where most requests must surely come from, so the upper layer's
-notion of how many requests we have outstanding will be wrong most or
-all of the time. 
-
-Note, the normal SCSI mid-layer error handling doesn't work well
-for this driver because 1) it takes the io_request_lock before
-calling error handlers and uses a local variable to store flags,
-so the io_request_lock cannot be released and interrupts enabled
-inside the error handlers, and, the error handlers cannot poll
-for command completion because they might get commands from the
-block half of the driver completing, and not know what to do
-with them.  That's what we get for making a hybrid scsi/block
-driver, I suppose.
-
-*/
-
-struct cciss_scsi_dev_t {
-	int devtype;
-	int bus, target, lun;		/* as presented to the OS */
-	unsigned char scsi3addr[8];	/* as presented to the HW */
-	unsigned char device_id[16];	/* from inquiry pg. 0x83 */
-	unsigned char vendor[8];	/* bytes 8-15 of inquiry data */
-	unsigned char model[16];	/* bytes 16-31 of inquiry data */
-	unsigned char revision[4];	/* bytes 32-35 of inquiry data */
-};
-
-struct cciss_scsi_hba_t {
-	char *name;
-	int ndevices;
-#define CCISS_MAX_SCSI_DEVS_PER_HBA 16
-	struct cciss_scsi_dev_t dev[CCISS_MAX_SCSI_DEVS_PER_HBA];
-};
-
-#endif /* _CCISS_SCSI_H_ */
-#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 62cff5afc6bdc48d3a8b20b6ae38d3f4da77e139..84eab28665f37c148407a9bdeb99adccac5f788c 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -2079,7 +2079,7 @@ void
 mpt_detach(struct pci_dev *pdev)
 {
 	MPT_ADAPTER 	*ioc = pci_get_drvdata(pdev);
-	char pname[32];
+	char pname[64];
 	u8 cb_idx;
 	unsigned long flags;
 	struct workqueue_struct *wq;
@@ -2100,11 +2100,11 @@ mpt_detach(struct pci_dev *pdev)
 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 	destroy_workqueue(wq);
 
-	sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
+	snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
 	remove_proc_entry(pname, NULL);
-	sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name);
+	snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name);
 	remove_proc_entry(pname, NULL);
-	sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
+	snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
 	remove_proc_entry(pname, NULL);
 
 	/* call per device driver remove entry point */
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index d065062240bce109c2e177bb76c61c21ce47d11e..6d461ca97150553cba7ddcd880571247af1a50c0 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -104,7 +104,6 @@ static void mptfc_remove(struct pci_dev *pdev);
 static int mptfc_abort(struct scsi_cmnd *SCpnt);
 static int mptfc_dev_reset(struct scsi_cmnd *SCpnt);
 static int mptfc_bus_reset(struct scsi_cmnd *SCpnt);
-static int mptfc_host_reset(struct scsi_cmnd *SCpnt);
 
 static struct scsi_host_template mptfc_driver_template = {
 	.module				= THIS_MODULE,
@@ -123,7 +122,7 @@ static struct scsi_host_template mptfc_driver_template = {
 	.eh_abort_handler		= mptfc_abort,
 	.eh_device_reset_handler	= mptfc_dev_reset,
 	.eh_bus_reset_handler		= mptfc_bus_reset,
-	.eh_host_reset_handler		= mptfc_host_reset,
+	.eh_host_reset_handler		= mptscsih_host_reset,
 	.bios_param			= mptscsih_bios_param,
 	.can_queue			= MPT_FC_CAN_QUEUE,
 	.this_id			= -1,
@@ -254,13 +253,6 @@ mptfc_bus_reset(struct scsi_cmnd *SCpnt)
 	    mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__);
 }
 
-static int
-mptfc_host_reset(struct scsi_cmnd *SCpnt)
-{
-	return
-	    mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__);
-}
-
 static void
 mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
 {
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index f6308ad35b19774d64fcdd008466ac458a185f61..345f6035599eaf91e20032fd1f5d819b9549729e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2210,33 +2210,26 @@ mptsas_get_bay_identifier(struct sas_rphy *rphy)
 	return rc;
 }
 
-static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
-			      struct request *req)
+static void mptsas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+		struct sas_rphy *rphy)
 {
 	MPT_ADAPTER *ioc = ((MPT_SCSI_HOST *) shost->hostdata)->ioc;
 	MPT_FRAME_HDR *mf;
 	SmpPassthroughRequest_t *smpreq;
-	struct request *rsp = req->next_rq;
-	int ret;
 	int flagsLength;
 	unsigned long timeleft;
 	char *psge;
-	dma_addr_t dma_addr_in = 0;
-	dma_addr_t dma_addr_out = 0;
 	u64 sas_address = 0;
-
-	if (!rsp) {
-		printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n",
-		    ioc->name, __func__);
-		return -EINVAL;
-	}
+	unsigned int reslen = 0;
+	int ret = -EINVAL;
 
 	/* do we need to support multiple segments? */
-	if (bio_multiple_segments(req->bio) ||
-	    bio_multiple_segments(rsp->bio)) {
+	if (job->request_payload.sg_cnt > 1 ||
+	    job->reply_payload.sg_cnt > 1) {
 		printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
-		    ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
-		return -EINVAL;
+		    ioc->name, __func__, job->request_payload.payload_len,
+		    job->reply_payload.payload_len);
+		goto out;
 	}
 
 	ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
@@ -2252,7 +2245,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	smpreq = (SmpPassthroughRequest_t *)mf;
 	memset(smpreq, 0, sizeof(*smpreq));
 
-	smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
+	smpreq->RequestDataLength =
+		cpu_to_le16(job->request_payload.payload_len - 4);
 	smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
 
 	if (rphy)
@@ -2278,13 +2272,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 		       MPI_SGE_FLAGS_END_OF_BUFFER |
 		       MPI_SGE_FLAGS_DIRECTION)
 		       << MPI_SGE_FLAGS_SHIFT;
-	flagsLength |= (blk_rq_bytes(req) - 4);
 
-	dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
-				      blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(ioc->pcidev, dma_addr_out))
+	if (!dma_map_sg(&ioc->pcidev->dev, job->request_payload.sg_list,
+			1, PCI_DMA_BIDIRECTIONAL))
 		goto put_mf;
-	ioc->add_sge(psge, flagsLength, dma_addr_out);
+
+	flagsLength |= (sg_dma_len(job->request_payload.sg_list) - 4);
+	ioc->add_sge(psge, flagsLength,
+			sg_dma_address(job->request_payload.sg_list));
 	psge += ioc->SGE_size;
 
 	/* response */
@@ -2294,12 +2289,13 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 		MPI_SGE_FLAGS_END_OF_BUFFER;
 
 	flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
-	flagsLength |= blk_rq_bytes(rsp) + 4;
-	dma_addr_in =  pci_map_single(ioc->pcidev, bio_data(rsp->bio),
-				      blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(ioc->pcidev, dma_addr_in))
-		goto unmap;
-	ioc->add_sge(psge, flagsLength, dma_addr_in);
+
+	if (!dma_map_sg(&ioc->pcidev->dev, job->reply_payload.sg_list,
+			1, PCI_DMA_BIDIRECTIONAL))
+		goto unmap_out;
+	flagsLength |= sg_dma_len(job->reply_payload.sg_list) + 4;
+	ioc->add_sge(psge, flagsLength,
+			sg_dma_address(job->reply_payload.sg_list));
 
 	INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
 	mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
@@ -2310,10 +2306,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 		mpt_free_msg_frame(ioc, mf);
 		mf = NULL;
 		if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
-			goto unmap;
+			goto unmap_in;
 		if (!timeleft)
 			mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
-		goto unmap;
+		goto unmap_in;
 	}
 	mf = NULL;
 
@@ -2321,23 +2317,22 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 		SmpPassthroughReply_t *smprep;
 
 		smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
-		memcpy(scsi_req(req)->sense, smprep, sizeof(*smprep));
-		scsi_req(req)->sense_len = sizeof(*smprep);
-		scsi_req(req)->resid_len = 0;
-		scsi_req(rsp)->resid_len -= smprep->ResponseDataLength;
+		memcpy(job->reply, smprep, sizeof(*smprep));
+		job->reply_len = sizeof(*smprep);
+		reslen = smprep->ResponseDataLength;
 	} else {
 		printk(MYIOC_s_ERR_FMT
 		    "%s: smp passthru reply failed to be returned\n",
 		    ioc->name, __func__);
 		ret = -ENXIO;
 	}
-unmap:
-	if (dma_addr_out)
-		pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
-				 PCI_DMA_BIDIRECTIONAL);
-	if (dma_addr_in)
-		pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
-				 PCI_DMA_BIDIRECTIONAL);
+
+unmap_in:
+	dma_unmap_sg(&ioc->pcidev->dev, job->reply_payload.sg_list, 1,
+			PCI_DMA_BIDIRECTIONAL);
+unmap_out:
+	dma_unmap_sg(&ioc->pcidev->dev, job->request_payload.sg_list, 1,
+			PCI_DMA_BIDIRECTIONAL);
 put_mf:
 	if (mf)
 		mpt_free_msg_frame(ioc, mf);
@@ -2345,7 +2340,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
 	mutex_unlock(&ioc->sas_mgmt.mutex);
 out:
-	return ret;
+	bsg_job_done(job, ret, reslen);
 }
 
 static struct sas_function_template mptsas_transport_functions = {
@@ -4352,11 +4347,10 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
 			return;
 
 		phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
-		/* Only For SATA Device ADD */
-		if (!phy_info && (sas_device.device_info &
-				MPI_SAS_DEVICE_INFO_SATA_DEVICE)) {
+		/* Device hot plug */
+		if (!phy_info) {
 			devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-				"%s %d SATA HOT PLUG: "
+				"%s %d HOT PLUG: "
 				"parent handle of device %x\n", ioc->name,
 				__func__, __LINE__, sas_device.handle_parent));
 			port_info = mptsas_find_portinfo_by_handle(ioc,
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index bcc8f3dfd4c4228d5c34286a9f33284b12946773..82ac331d91254e72debc5d021808a6282e93dc1b 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -29,7 +29,6 @@
 #define KMSG_COMPONENT "zfcp"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
-#include <linux/miscdevice.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/module.h>
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index d5bf36ec8a751326062e47abe80ddd1f61a5f43b..8227076c9cbbf7de9915aa5dabad4b370f81061d 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
  *
  * Debug traces for zfcp.
  *
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -113,8 +113,12 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
 	struct zfcp_dbf *dbf = req->adapter->dbf;
 	struct fsf_status_read_buffer *srb = req->data;
 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	static int const level = 2;
 	unsigned long flags;
 
+	if (unlikely(!debug_level_enabled(dbf->hba, level)))
+		return;
+
 	spin_lock_irqsave(&dbf->hba_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 
@@ -142,7 +146,7 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
 		zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
 				  "fsf_uss", req->req_id);
 log:
-	debug_event(dbf->hba, 2, rec, sizeof(*rec));
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 
@@ -156,8 +160,12 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
 	struct zfcp_dbf *dbf = req->adapter->dbf;
 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
 	struct fsf_status_read_buffer *sr_buf = req->data;
+	static int const level = 1;
 	unsigned long flags;
 
+	if (unlikely(!debug_level_enabled(dbf->hba, level)))
+		return;
+
 	spin_lock_irqsave(&dbf->hba_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 
@@ -169,7 +177,7 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
 	memcpy(&rec->u.be, &sr_buf->payload.bit_error,
 	       sizeof(struct fsf_bit_error_payload));
 
-	debug_event(dbf->hba, 1, rec, sizeof(*rec));
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 
@@ -186,8 +194,12 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
 	struct zfcp_dbf *dbf = adapter->dbf;
 	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
 	unsigned long flags;
+	static int const level = 1;
 	u16 length;
 
+	if (unlikely(!debug_level_enabled(dbf->pay, level)))
+		return;
+
 	if (!pl)
 		return;
 
@@ -202,7 +214,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
 
 	while (payload->counter < scount && (char *)pl[payload->counter]) {
 		memcpy(payload->data, (char *)pl[payload->counter], length);
-		debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
+		debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
 		payload->counter++;
 	}
 
@@ -217,15 +229,19 @@ void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
 {
 	struct zfcp_dbf *dbf = adapter->dbf;
 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	static int const level = 1;
 	unsigned long flags;
 
+	if (unlikely(!debug_level_enabled(dbf->hba, level)))
+		return;
+
 	spin_lock_irqsave(&dbf->hba_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 
 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 	rec->id = ZFCP_DBF_HBA_BASIC;
 
-	debug_event(dbf->hba, 1, rec, sizeof(*rec));
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 
@@ -264,9 +280,13 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
 {
 	struct zfcp_dbf *dbf = adapter->dbf;
 	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+	static int const level = 1;
 	struct list_head *entry;
 	unsigned long flags;
 
+	if (unlikely(!debug_level_enabled(dbf->rec, level)))
+		return;
+
 	spin_lock_irqsave(&dbf->rec_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 
@@ -283,7 +303,7 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
 	rec->u.trig.want = want;
 	rec->u.trig.need = need;
 
-	debug_event(dbf->rec, 1, rec, sizeof(*rec));
+	debug_event(dbf->rec, level, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
@@ -300,6 +320,9 @@ void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
 	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
 	unsigned long flags;
 
+	if (!debug_level_enabled(dbf->rec, level))
+		return;
+
 	spin_lock_irqsave(&dbf->rec_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 
@@ -345,8 +368,12 @@ void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
 {
 	struct zfcp_dbf *dbf = wka_port->adapter->dbf;
 	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+	static int const level = 1;
 	unsigned long flags;
 
+	if (unlikely(!debug_level_enabled(dbf->rec, level)))
+		return;
+
 	spin_lock_irqsave(&dbf->rec_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 
@@ -362,10 +389,12 @@ void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
 	rec->u.run.rec_action = ~0;
 	rec->u.run.rec_count = ~0;
 
-	debug_event(dbf->rec, 1, rec, sizeof(*rec));
+	debug_event(dbf->rec, level, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
+#define ZFCP_DBF_SAN_LEVEL 1
+
 static inline
 void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
 		  char *paytag, struct scatterlist *sg, u8 id, u16 len,
@@ -408,7 +437,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
 				      (u16)(sg->length - offset));
 			/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
 			memcpy(payload->data, sg_virt(sg) + offset, pay_len);
-			debug_event(dbf->pay, 1, payload,
+			debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
 				    zfcp_dbf_plen(pay_len));
 			payload->counter++;
 			offset += pay_len;
@@ -418,7 +447,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
 	spin_unlock(&dbf->pay_lock);
 
 out:
-	debug_event(dbf->san, 1, rec, sizeof(*rec));
+	debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->san_lock, flags);
 }
 
@@ -434,6 +463,9 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
 	u16 length;
 
+	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
+		return;
+
 	length = (u16)zfcp_qdio_real_bytes(ct_els->req);
 	zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
 		     length, fsf->req_id, d_id, length);
@@ -447,6 +479,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
 	struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
 	struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
 	struct scatterlist *resp_entry = ct_els->resp;
+	struct fc_ct_hdr *resph;
 	struct fc_gpn_ft_resp *acc;
 	int max_entries, x, last = 0;
 
@@ -460,7 +493,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
 	      && reqh->ct_fs_subtype == FC_NS_SUBTYPE
 	      && reqh->ct_options == 0
 	      && reqh->_ct_resvd1 == 0
-	      && reqh->ct_cmd == FC_NS_GPN_FT
+	      && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
 	      /* reqh->ct_mr_size can vary so do not match but read below */
 	      && reqh->_ct_resvd2 == 0
 	      && reqh->ct_reason == 0
@@ -473,7 +506,15 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
 		return len; /* not GPN_FT response so do not cap */
 
 	acc = sg_virt(resp_entry);
-	max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+
+	/* cap all but accept CT responses to at least the CT header */
+	resph = (struct fc_ct_hdr *)acc;
+	if ((ct_els->status) ||
+	    (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
+		return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
+
+	max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
+		       sizeof(struct fc_gpn_ft_resp))
 		+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
 		     * to account for header as 1st pseudo "entry" */;
 
@@ -503,6 +544,9 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
 	u16 length;
 
+	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
+		return;
+
 	length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
 	zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
 		     length, fsf->req_id, ct_els->d_id,
@@ -522,6 +566,9 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
 	u16 length;
 	struct scatterlist sg;
 
+	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
+		return;
+
 	length = (u16)(srb->length -
 			offsetof(struct fsf_status_read_buffer, payload));
 	sg_init_one(&sg, srb->payload.data, length);
@@ -555,8 +602,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
 	rec->scsi_retries = sc->retries;
 	rec->scsi_allowed = sc->allowed;
 	rec->scsi_id = sc->device->id;
-	/* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
 	rec->scsi_lun = (u32)sc->device->lun;
+	rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
 	rec->host_scribble = (unsigned long)sc->host_scribble;
 
 	memcpy(rec->scsi_opcode, sc->cmnd,
@@ -564,19 +611,31 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
 
 	if (fsf) {
 		rec->fsf_req_id = fsf->req_id;
-		fcp_rsp = (struct fcp_resp_with_ext *)
-				&(fsf->qtcb->bottom.io.fcp_rsp);
+		rec->pl_len = FCP_RESP_WITH_EXT;
+		fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
+		/* mandatory parts of FCP_RSP IU in this SCSI record */
 		memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
 		if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
 			fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
 			rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
+			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
 		}
 		if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
-			rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
-					  (u16)ZFCP_DBF_PAY_MAX_REC);
-			zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
-					  "fcp_sns", fsf->req_id);
+			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
 		}
+		/* complete FCP_RSP IU in associated PAYload record
+		 * but only if there are optional parts
+		 */
+		if (fcp_rsp->resp.fr_flags != 0)
+			zfcp_dbf_pl_write(
+				dbf, fcp_rsp,
+				/* at least one full PAY record
+				 * but not beyond hardware response field
+				 */
+				min_t(u16, max_t(u16, rec->pl_len,
+						 ZFCP_DBF_PAY_MAX_REC),
+				      FSF_FCP_RSP_SIZE),
+				"fcp_riu", fsf->req_id);
 	}
 
 	debug_event(dbf->scsi, level, rec, sizeof(*rec));
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index db186d44cfafb6036a7e452e68e2c8f078cff6d1..3508c00458f407101cda09d5ad9c047eff1f1932 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
  * zfcp device driver
  * debug feature declarations
  *
- * Copyright IBM Corp. 2008, 2016
+ * Copyright IBM Corp. 2008, 2017
  */
 
 #ifndef ZFCP_DBF_H
@@ -204,16 +204,17 @@ enum zfcp_dbf_scsi_id {
  * @id: unique number of recovery record type
  * @tag: identifier string specifying the location of initiation
  * @scsi_id: scsi device id
- * @scsi_lun: scsi device logical unit number
+ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
  * @scsi_result: scsi result
  * @scsi_retries: current retry number of scsi request
  * @scsi_allowed: allowed retries
- * @fcp_rsp_info: FCP response info
+ * @fcp_rsp_info: FCP response info code
  * @scsi_opcode: scsi opcode
  * @fsf_req_id: request id of fsf request
  * @host_scribble: LLD specific data attached to SCSI request
- * @pl_len: length of paload stored as zfcp_dbf_pay
- * @fsf_rsp: response for fsf request
+ * @pl_len: length of payload stored as zfcp_dbf_pay
+ * @fcp_rsp: response for FCP request
+ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
  */
 struct zfcp_dbf_scsi {
 	u8 id;
@@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
 	u64 host_scribble;
 	u16 pl_len;
 	struct fcp_resp_with_ext fcp_rsp;
+	u32 scsi_lun_64_hi;
 } __packed;
 
 /**
@@ -299,7 +301,7 @@ bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
 
 	if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
 		return false; /* not an FCP response */
-	fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
+	fcp_rsp = &qtcb->bottom.io.fcp_rsp.iu.resp;
 	rsp_flags = fcp_rsp->fr_flags;
 	fr_status = fcp_rsp->fr_status;
 	return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
@@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
 {
 	struct fsf_qtcb *qtcb = req->qtcb;
 
-	if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+	if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
+				    ZFCP_STATUS_FSFREQ_ERROR))) {
+		zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
+
+	} else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
 	    (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
 		zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
 
@@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
  * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
  */
 static inline
-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
+			    struct zfcp_fsf_req *fsf_req)
 {
 	char tmp_tag[ZFCP_DBF_TAG_LEN];
 
@@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
 		memcpy(tmp_tag, "lr_", 3);
 
 	memcpy(&tmp_tag[3], tag, 4);
-	_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
+	_zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
 }
 
 /**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 7ccfce55903423f5e998fe3d93f6942a5879376f..37408f5f81ce84e5f4f598069bbc417edff71469 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -572,9 +572,8 @@ static void zfcp_erp_memwait_handler(unsigned long data)
 
 static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
 {
-	init_timer(&erp_action->timer);
-	erp_action->timer.function = zfcp_erp_memwait_handler;
-	erp_action->timer.data = (unsigned long) erp_action;
+	setup_timer(&erp_action->timer, zfcp_erp_memwait_handler,
+		    (unsigned long) erp_action);
 	erp_action->timer.expires = jiffies + HZ;
 	add_timer(&erp_action->timer);
 }
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 9afdbc32b23f6386a7dce2b51e4cf67ad562d215..a9e968717dd921eb40aca7293f765f285e4ab70c 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -41,7 +41,6 @@ extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
 extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
 extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
 extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 7331eea67435c28d2f96a70ce99b864544a69bf2..8210645c21111265ec73a5b4f5142a408e8a2530 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
  *
  * Fibre Channel related functions for the zfcp device driver.
  *
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2017
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -29,7 +29,7 @@ static u32 zfcp_fc_rscn_range_mask[] = {
 };
 
 static bool no_auto_port_rescan;
-module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
+module_param(no_auto_port_rescan, bool, 0600);
 MODULE_PARM_DESC(no_auto_port_rescan,
 		 "no automatic port_rescan (default off)");
 
@@ -260,7 +260,8 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 	page = (struct fc_els_rscn_page *) head;
 
 	/* see FC-FS */
-	no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
+	no_entries = be16_to_cpu(head->rscn_plen) /
+		sizeof(struct fc_els_rscn_page);
 
 	for (i = 1; i < no_entries; i++) {
 		/* skip head and start with 1st element */
@@ -296,7 +297,7 @@ static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
 
 	status_buffer = (struct fsf_status_read_buffer *) req->data;
 	plogi = (struct fc_els_flogi *) status_buffer->payload.data;
-	zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
+	zfcp_fc_incoming_wwpn(req, be64_to_cpu(plogi->fl_wwpn));
 }
 
 static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
@@ -306,7 +307,7 @@ static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
 	struct fc_els_logo *logo =
 		(struct fc_els_logo *) status_buffer->payload.data;
 
-	zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
+	zfcp_fc_incoming_wwpn(req, be64_to_cpu(logo->fl_n_port_wwn));
 }
 
 /**
@@ -335,7 +336,7 @@ static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
 
 	if (ct_els->status)
 		return;
-	if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
+	if (gid_pn_rsp->ct_hdr.ct_cmd != cpu_to_be16(FC_FS_ACC))
 		return;
 
 	/* looks like a valid d_id */
@@ -352,8 +353,8 @@ static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
 	ct_hdr->ct_rev = FC_CT_REV;
 	ct_hdr->ct_fs_type = FC_FST_DIR;
 	ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
-	ct_hdr->ct_cmd = cmd;
-	ct_hdr->ct_mr_size = mr_size / 4;
+	ct_hdr->ct_cmd = cpu_to_be16(cmd);
+	ct_hdr->ct_mr_size = cpu_to_be16(mr_size / 4);
 }
 
 static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
@@ -376,7 +377,7 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
 
 	zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
 			   FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
-	gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
+	gid_pn_req->gid_pn.fn_wwpn = cpu_to_be64(port->wwpn);
 
 	ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
 			       adapter->pool.gid_pn_req,
@@ -460,26 +461,26 @@ void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
  */
 void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
 {
-	if (plogi->fl_wwpn != port->wwpn) {
+	if (be64_to_cpu(plogi->fl_wwpn) != port->wwpn) {
 		port->d_id = 0;
 		dev_warn(&port->adapter->ccw_device->dev,
 			 "A port opened with WWPN 0x%016Lx returned data that "
 			 "identifies it as WWPN 0x%016Lx\n",
 			 (unsigned long long) port->wwpn,
-			 (unsigned long long) plogi->fl_wwpn);
+			 (unsigned long long) be64_to_cpu(plogi->fl_wwpn));
 		return;
 	}
 
-	port->wwnn = plogi->fl_wwnn;
-	port->maxframe_size = plogi->fl_csp.sp_bb_data;
+	port->wwnn = be64_to_cpu(plogi->fl_wwnn);
+	port->maxframe_size = be16_to_cpu(plogi->fl_csp.sp_bb_data);
 
-	if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
+	if (plogi->fl_cssp[0].cp_class & cpu_to_be16(FC_CPC_VALID))
 		port->supported_classes |= FC_COS_CLASS1;
-	if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
+	if (plogi->fl_cssp[1].cp_class & cpu_to_be16(FC_CPC_VALID))
 		port->supported_classes |= FC_COS_CLASS2;
-	if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
+	if (plogi->fl_cssp[2].cp_class & cpu_to_be16(FC_CPC_VALID))
 		port->supported_classes |= FC_COS_CLASS3;
-	if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
+	if (plogi->fl_cssp[3].cp_class & cpu_to_be16(FC_CPC_VALID))
 		port->supported_classes |= FC_COS_CLASS4;
 }
 
@@ -497,9 +498,9 @@ static void zfcp_fc_adisc_handler(void *data)
 	}
 
 	if (!port->wwnn)
-		port->wwnn = adisc_resp->adisc_wwnn;
+		port->wwnn = be64_to_cpu(adisc_resp->adisc_wwnn);
 
-	if ((port->wwpn != adisc_resp->adisc_wwpn) ||
+	if ((port->wwpn != be64_to_cpu(adisc_resp->adisc_wwpn)) ||
 	    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
 				     "fcadh_2");
@@ -538,8 +539,8 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
 
 	/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
 	   without FC-AL-2 capability, so we don't set it */
-	fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
-	fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
+	fc_req->u.adisc.req.adisc_wwpn = cpu_to_be64(fc_host_port_name(shost));
+	fc_req->u.adisc.req.adisc_wwnn = cpu_to_be64(fc_host_node_name(shost));
 	fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
 	hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
 
@@ -666,8 +667,8 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
 	if (ct_els->status)
 		return -EIO;
 
-	if (hdr->ct_cmd != FC_FS_ACC) {
-		if (hdr->ct_reason == FC_BA_RJT_UNABLE)
+	if (hdr->ct_cmd != cpu_to_be16(FC_FS_ACC)) {
+		if (hdr->ct_reason == FC_FS_RJT_UNABL)
 			return -EAGAIN; /* might be a temporary condition */
 		return -EIO;
 	}
@@ -693,10 +694,11 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
 		if (d_id >= FC_FID_WELL_KNOWN_BASE)
 			continue;
 		/* skip the adapter's port and known remote ports */
-		if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
+		if (be64_to_cpu(acc->fp_wwpn) ==
+		    fc_host_port_name(adapter->scsi_host))
 			continue;
 
-		port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
+		port = zfcp_port_enqueue(adapter, be64_to_cpu(acc->fp_wwpn),
 					 ZFCP_STATUS_COMMON_NOESC, d_id);
 		if (!IS_ERR(port))
 			zfcp_erp_port_reopen(port, 0, "fcegpf1");
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index df2b541c828720718a322003d43f7d54e2aef3eb..41f22d3dc6d1bc3f4917f57fff4ce421f4243899 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -4,7 +4,7 @@
  * Fibre Channel related definitions and inline functions for the zfcp
  * device driver
  *
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2017
  */
 
 #ifndef ZFCP_FC_H
@@ -212,6 +212,8 @@ static inline
 void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
 			 u8 tm_flags)
 {
+	u32 datalen;
+
 	int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
 
 	if (unlikely(tm_flags)) {
@@ -228,10 +230,13 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
 
 	memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
 
-	fcp->fc_dl = scsi_bufflen(scsi);
+	datalen = scsi_bufflen(scsi);
+	fcp->fc_dl = cpu_to_be32(datalen);
 
-	if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1)
-		fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
+	if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1) {
+		datalen += datalen / scsi->device->sector_size * 8;
+		fcp->fc_dl = cpu_to_be32(datalen);
+	}
 }
 
 /**
@@ -266,19 +271,23 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
 	if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
 		sense = (char *) &fcp_rsp[1];
 		if (rsp_flags & FCP_RSP_LEN_VAL)
-			sense += fcp_rsp->ext.fr_rsp_len;
-		sense_len = min(fcp_rsp->ext.fr_sns_len,
-				(u32) SCSI_SENSE_BUFFERSIZE);
+			sense += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
+		sense_len = min_t(u32, be32_to_cpu(fcp_rsp->ext.fr_sns_len),
+				  SCSI_SENSE_BUFFERSIZE);
 		memcpy(scsi->sense_buffer, sense, sense_len);
 	}
 
 	if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
-		resid = fcp_rsp->ext.fr_resid;
+		resid = be32_to_cpu(fcp_rsp->ext.fr_resid);
 		scsi_set_resid(scsi, resid);
 		if (scsi_bufflen(scsi) - resid < scsi->underflow &&
 		     !(rsp_flags & FCP_SNS_LEN_VAL) &&
 		     fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
 			set_host_byte(scsi, DID_ERROR);
+	} else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
+		/* FCP_DL was not sufficient for SCSI data length */
+		if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+			set_host_byte(scsi, DID_ERROR);
 	}
 }
 
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 27ff38f839fc1c752385f430f55020fb934ed6de..69d1dc3ec79d8be48f783baf695616674b06c4d4 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
  *
  * Implementation of FSF commands.
  *
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2017
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -197,8 +197,6 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
 
 	switch (sr_buf->status_subtype) {
 	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
-		zfcp_fsf_link_down_info_eval(req, ldi);
-		break;
 	case FSF_STATUS_READ_SUB_FDISC_FAILED:
 		zfcp_fsf_link_down_info_eval(req, ldi);
 		break;
@@ -476,8 +474,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 	if (req->data)
 		memcpy(req->data, bottom, sizeof(*bottom));
 
-	fc_host_port_name(shost) = nsp->fl_wwpn;
-	fc_host_node_name(shost) = nsp->fl_wwnn;
+	fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
+	fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
 	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
 
 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
@@ -503,8 +501,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 	switch (bottom->fc_topology) {
 	case FSF_TOPO_P2P:
 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
-		adapter->peer_wwpn = plogi->fl_wwpn;
-		adapter->peer_wwnn = plogi->fl_wwnn;
+		adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
+		adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
 		break;
 	case FSF_TOPO_FABRIC:
@@ -928,8 +926,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 
 	switch (header->fsf_status) {
         case FSF_GOOD:
-		zfcp_dbf_san_res("fsscth2", req);
 		ct->status = 0;
+		zfcp_dbf_san_res("fsscth2", req);
 		break;
         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
 		zfcp_fsf_class_not_supp(req);
@@ -991,8 +989,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
 		qtcb->bottom.support.resp_buf_length =
 			zfcp_qdio_real_bytes(sg_resp);
 
-		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
-					zfcp_qdio_sbale_count(sg_req));
+		zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
 		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
 		return 0;
@@ -1109,8 +1106,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
 
 	switch (header->fsf_status) {
 	case FSF_GOOD:
-		zfcp_dbf_san_res("fsselh1", req);
 		send_els->status = 0;
+		zfcp_dbf_san_res("fsselh1", req);
 		break;
 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
 		zfcp_fsf_class_not_supp(req);
@@ -1394,6 +1391,8 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (header->fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+			/* no zfcp_fc_test_link() with failed open port */
+			/* fall through */
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 		case FSF_SQ_NO_RETRY_POSSIBLE:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -2142,7 +2141,8 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
 		zfcp_scsi_dif_sense_error(scpnt, 0x3);
 		goto skip_fsfstatus;
 	}
-	fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+	BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
+	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
 	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
 
 skip_fsfstatus:
@@ -2255,10 +2255,12 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
 	if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
 		goto failed_scsi_cmnd;
 
-	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+	BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
+	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
 
-	if (scsi_prot_sg_count(scsi_cmnd)) {
+	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
+	    scsi_prot_sg_count(scsi_cmnd)) {
 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
 				       scsi_prot_sg_count(scsi_cmnd));
 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
@@ -2299,7 +2301,7 @@ static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
 
 	zfcp_fsf_fcp_handler_common(req);
 
-	fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
+	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
 	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
 
 	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
@@ -2348,7 +2350,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
 
 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
 
-	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
 
 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
@@ -2392,7 +2394,6 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
 			      req_id, dev_name(&adapter->ccw_device->dev));
 		}
 
-		fsf_req->qdio_req.sbal_response = sbal_idx;
 		zfcp_fsf_req_complete(fsf_req);
 
 		if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index ea3c76ac0de14dc8ea9ad147d1f5b9ab80cc1172..88feba5bfda433bf7dba59ef3ed2d09af73f596c 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
  *
  * Interface to the FSF support functions.
  *
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
  */
 
 #ifndef FSF_H
@@ -312,8 +312,14 @@ struct fsf_qtcb_bottom_io {
 	u32 data_block_length;
 	u32 prot_data_length;
 	u8  res2[4];
-	u8  fcp_cmnd[FSF_FCP_CMND_SIZE];
-	u8  fcp_rsp[FSF_FCP_RSP_SIZE];
+	union {
+		u8		byte[FSF_FCP_CMND_SIZE];
+		struct fcp_cmnd iu;
+	}   fcp_cmnd;
+	union {
+		u8			 byte[FSF_FCP_RSP_SIZE];
+		struct fcp_resp_with_ext iu;
+	}   fcp_rsp;
 	u8  res3[64];
 } __attribute__ ((packed));
 
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbf2b54703f72171cc863879f7cb70424fe220ca..9e358fc04b78d2b6f7e363aa4d9d45ac8cd3d09f 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -14,7 +14,7 @@
 #include "zfcp_ext.h"
 #include "zfcp_qdio.h"
 
-static bool enable_multibuffer = 1;
+static bool enable_multibuffer = true;
 module_param_named(datarouter, enable_multibuffer, bool, 0400);
 MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
 
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 497cd379b0d10b065940e096ee1b60491c48ac44..7f647a90c750316201c81bcfe6f8928e4b15e65a 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -54,7 +54,6 @@ struct zfcp_qdio {
  * @sbal_last: last sbal for this request
  * @sbal_limit: last possible sbal for this request
  * @sbale_curr: current sbale at creation of this request
- * @sbal_response: sbal used in interrupt
  * @qdio_outb_usage: usage of outbound queue
  */
 struct zfcp_qdio_req {
@@ -64,7 +63,6 @@ struct zfcp_qdio_req {
 	u8	sbal_last;
 	u8	sbal_limit;
 	u8	sbale_curr;
-	u8	sbal_response;
 	u16	qdio_outb_usage;
 };
 
@@ -224,21 +222,6 @@ void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
 	sbale->length = count;
 }
 
-/**
- * zfcp_qdio_sbale_count - count sbale used
- * @sg: pointer to struct scatterlist
- */
-static inline
-unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg)
-{
-	unsigned int count = 0;
-
-	for (; sg; sg = sg_next(sg))
-		count++;
-
-	return count;
-}
-
 /**
  * zfcp_qdio_real_bytes - count bytes used
  * @sg: pointer to struct scatterlist
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 0678cf714c0ee6060eb10e80c3a7c05f6a21f1d2..ec3ddd1d31d5f5be43fb70425dd3b8981a179f63 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -28,7 +28,7 @@ static bool enable_dif;
 module_param_named(dif, enable_dif, bool, 0400);
 MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
 
-static bool allow_lun_scan = 1;
+static bool allow_lun_scan = true;
 module_param(allow_lun_scan, bool, 0600);
 MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
 
@@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
 
 		zfcp_erp_wait(adapter);
 		ret = fc_block_scsi_eh(scpnt);
-		if (ret)
+		if (ret) {
+			zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
 			return ret;
+		}
 
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING)) {
-			zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
+			zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
 			return SUCCESS;
 		}
 	}
-	if (!fsf_req)
+	if (!fsf_req) {
+		zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
 		return FAILED;
+	}
 
 	wait_for_completion(&fsf_req->completion);
 
 	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
-		zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
+		zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
 		retval = FAILED;
 	} else {
-		zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+		zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
 		zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
 	}
 
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 4b3b08025ef60d3254f5b571866fd1e76dcd5666..6be77b3aa8a5f53210916c428190bf95d02b18be 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -168,7 +168,6 @@ MODULE_LICENSE("GPL");
 
 STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
-STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
@@ -315,7 +314,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
 	/* Fill in the missing routines from the host template */
 	tpnt->queuecommand = NCR_700_queuecommand;
 	tpnt->eh_abort_handler = NCR_700_abort;
-	tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
 	tpnt->eh_host_reset_handler = NCR_700_host_reset;
 	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
 	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
@@ -1938,14 +1936,14 @@ NCR_700_abort(struct scsi_cmnd * SCp)
 }
 
 STATIC int
-NCR_700_bus_reset(struct scsi_cmnd * SCp)
+NCR_700_host_reset(struct scsi_cmnd * SCp)
 {
 	DECLARE_COMPLETION_ONSTACK(complete);
 	struct NCR_700_Host_Parameters *hostdata = 
 		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
 
 	scmd_printk(KERN_INFO, SCp,
-		"New error handler wants BUS reset, cmd %p\n\t", SCp);
+		"New error handler wants HOST reset, cmd %p\n\t", SCp);
 	scsi_print_command(SCp);
 
 	/* In theory, eh_complete should always be null because the
@@ -1960,6 +1958,7 @@ NCR_700_bus_reset(struct scsi_cmnd * SCp)
 
 	hostdata->eh_complete = &complete;
 	NCR_700_internal_bus_reset(SCp->device->host);
+	NCR_700_chip_reset(SCp->device->host);
 
 	spin_unlock_irq(SCp->device->host->host_lock);
 	wait_for_completion(&complete);
@@ -1974,22 +1973,6 @@ NCR_700_bus_reset(struct scsi_cmnd * SCp)
 	return SUCCESS;
 }
 
-STATIC int
-NCR_700_host_reset(struct scsi_cmnd * SCp)
-{
-	scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
-	scsi_print_command(SCp);
-
-	spin_lock_irq(SCp->device->host->host_lock);
-
-	NCR_700_internal_bus_reset(SCp->device->host);
-	NCR_700_chip_reset(SCp->device->host);
-
-	spin_unlock_irq(SCp->device->host->host_lock);
-
-	return SUCCESS;
-}
-
 STATIC void
 NCR_700_set_period(struct scsi_target *STp, int period)
 {
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index acc33440bca0bfbc3e1c168ffb73ed952efe97d1..8a0812221d72fdb9488b33f5ff5bfe04bf137fcf 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -2296,13 +2296,13 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
 
 
 /**
- * NCR5380_bus_reset - reset the SCSI bus
+ * NCR5380_host_reset - reset the SCSI host
  * @cmd: SCSI command undergoing EH
  *
  * Returns SUCCESS
  */
 
-static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+static int NCR5380_host_reset(struct scsi_cmnd *cmd)
 {
 	struct Scsi_Host *instance = cmd->device->host;
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 9176bfbd574586ab04a62786253271b1a1636006..61aadc7acb4998a58f27cb1872d883d7e771378d 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -147,22 +147,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
 	}
 }
 
-static int a2091_bus_reset(struct scsi_cmnd *cmd)
-{
-	struct Scsi_Host *instance = cmd->device->host;
-
-	/* FIXME perform bus-specific reset */
-
-	/* FIXME 2: kill this function, and let midlayer fall back
-	   to the same action, calling wd33c93_host_reset() */
-
-	spin_lock_irq(instance->host_lock);
-	wd33c93_host_reset(cmd);
-	spin_unlock_irq(instance->host_lock);
-
-	return SUCCESS;
-}
-
 static struct scsi_host_template a2091_scsi_template = {
 	.module			= THIS_MODULE,
 	.name			= "Commodore A2091/A590 SCSI",
@@ -171,7 +155,6 @@ static struct scsi_host_template a2091_scsi_template = {
 	.proc_name		= "A2901",
 	.queuecommand		= wd33c93_queuecommand,
 	.eh_abort_handler	= wd33c93_abort,
-	.eh_bus_reset_handler	= a2091_bus_reset,
 	.eh_host_reset_handler	= wd33c93_host_reset,
 	.can_queue		= CAN_QUEUE,
 	.this_id		= 7,
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index e6375b4de79e58e05ba935c51de8aa088435b792..2427a85412471f39d10b159c25154c3bbf37ae85 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -162,22 +162,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
 	}
 }
 
-static int a3000_bus_reset(struct scsi_cmnd *cmd)
-{
-	struct Scsi_Host *instance = cmd->device->host;
-
-	/* FIXME perform bus-specific reset */
-
-	/* FIXME 2: kill this entire function, which should
-	   cause mid-layer to call wd33c93_host_reset anyway? */
-
-	spin_lock_irq(instance->host_lock);
-	wd33c93_host_reset(cmd);
-	spin_unlock_irq(instance->host_lock);
-
-	return SUCCESS;
-}
-
 static struct scsi_host_template amiga_a3000_scsi_template = {
 	.module			= THIS_MODULE,
 	.name			= "Amiga 3000 built-in SCSI",
@@ -186,7 +170,6 @@ static struct scsi_host_template amiga_a3000_scsi_template = {
 	.proc_name		= "A3000",
 	.queuecommand		= wd33c93_queuecommand,
 	.eh_abort_handler	= wd33c93_abort,
-	.eh_bus_reset_handler	= a3000_bus_reset,
 	.eh_host_reset_handler	= wd33c93_host_reset,
 	.can_queue		= CAN_QUEUE,
 	.this_id		= 7,
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a1a2c71e162651f93d499c56e024e7073cc7fc79..a64285ab0728f14c870db3d72b19190d516d44f3 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -594,6 +594,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 
 	aac_fib_init(cmd_fibcontext);
 	dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 	dinfo->command = cpu_to_le32(VM_ContainerConfig);
 	dinfo->type = cpu_to_le32(CT_READ_NAME);
@@ -611,10 +612,8 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
 	aac_fib_complete(cmd_fibcontext);
@@ -725,6 +724,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
 
 	dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 	dinfo->type = cpu_to_le32(FT_FILESYS);
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 	status = aac_fib_send(ContainerCommand,
 			  fibptr,
@@ -736,9 +736,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS)
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
-	else if (status < 0) {
+	if (status < 0 && status != -EINPROGRESS) {
 		/* Inherit results from VM_NameServe, if any */
 		dresp->status = cpu_to_le32(ST_OK);
 		_aac_probe_container2(context, fibptr);
@@ -766,6 +764,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
 		dinfo->count = cpu_to_le32(scmd_id(scsicmd));
 		dinfo->type = cpu_to_le32(FT_FILESYS);
 		scsicmd->SCp.ptr = (char *)callback;
+		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 		status = aac_fib_send(ContainerCommand,
 			  fibptr,
@@ -777,10 +776,9 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
 		/*
 		 *	Check that the command queued to the controller
 		 */
-		if (status == -EINPROGRESS) {
-			scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+		if (status == -EINPROGRESS)
 			return 0;
-		}
+
 		if (status < 0) {
 			scsicmd->SCp.ptr = NULL;
 			aac_fib_complete(fibptr);
@@ -1126,6 +1124,7 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
 	dinfo->command = cpu_to_le32(VM_ContainerConfig);
 	dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
 	dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 	status = aac_fib_send(ContainerCommand,
 		  cmd_fibcontext,
@@ -1138,10 +1137,8 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
 	aac_fib_complete(cmd_fibcontext);
@@ -2335,16 +2332,14 @@ static int aac_read(struct scsi_cmnd * scsicmd)
 	 *	Alocate and initialize a Fib
 	 */
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 	status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
 
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
 	/*
@@ -2429,16 +2424,14 @@ static int aac_write(struct scsi_cmnd * scsicmd)
 	 *	Allocate and initialize a Fib then setup a BlockWrite command
 	 */
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 	status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
 
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
 	/*
@@ -2588,6 +2581,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
 	synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
 	synchronizecmd->count =
 	     cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 	/*
 	 *	Now send the Fib to the adapter
@@ -2603,10 +2597,8 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING
 		"aac_synchronize: aac_fib_send failed with status: %d.\n", status);
@@ -2666,6 +2658,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
 	pmcmd->cid = cpu_to_le32(sdev_id(sdev));
 	pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
 		cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 
 	/*
 	 *	Now send the Fib to the adapter
@@ -2681,10 +2674,8 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	aac_fib_complete(cmd_fibcontext);
 	aac_fib_free(cmd_fibcontext);
@@ -3692,16 +3683,14 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
 	 *	Allocate and initialize a Fib then setup a BlockWrite command
 	 */
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 	status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
 
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
 	aac_fib_complete(cmd_fibcontext);
@@ -3739,15 +3728,14 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
 	if (!cmd_fibcontext)
 		return -1;
 
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
 	status = aac_adapter_hba(cmd_fibcontext, scsicmd);
 
 	/*
 	 *	Check that the command queued to the controller
 	 */
-	if (status == -EINPROGRESS) {
-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	if (status == -EINPROGRESS)
 		return 0;
-	}
 
 	pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
 		status);
@@ -3763,6 +3751,8 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
 	struct aac_dev *dev;
 	unsigned long byte_count = 0;
 	int nseg;
+	struct scatterlist *sg;
+	int i;
 
 	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 	// Get rid of old data
@@ -3771,32 +3761,29 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
 	psg->sg[0].count = 0;
 
 	nseg = scsi_dma_map(scsicmd);
-	if (nseg < 0)
+	if (nseg <= 0)
 		return nseg;
-	if (nseg) {
-		struct scatterlist *sg;
-		int i;
 
-		psg->count = cpu_to_le32(nseg);
+	psg->count = cpu_to_le32(nseg);
 
-		scsi_for_each_sg(scsicmd, sg, nseg, i) {
-			psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
-			psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
-			byte_count += sg_dma_len(sg);
-		}
-		/* hba wants the size to be exact */
-		if (byte_count > scsi_bufflen(scsicmd)) {
-			u32 temp = le32_to_cpu(psg->sg[i-1].count) -
-				(byte_count - scsi_bufflen(scsicmd));
-			psg->sg[i-1].count = cpu_to_le32(temp);
-			byte_count = scsi_bufflen(scsicmd);
-		}
-		/* Check for command underflow */
-		if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
-			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
-					byte_count, scsicmd->underflow);
-		}
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
+		psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
+		byte_count += sg_dma_len(sg);
 	}
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+			(byte_count - scsi_bufflen(scsicmd));
+		psg->sg[i-1].count = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
+	}
+
 	return byte_count;
 }
 
@@ -3807,6 +3794,8 @@ static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
 	unsigned long byte_count = 0;
 	u64 addr;
 	int nseg;
+	struct scatterlist *sg;
+	int i;
 
 	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 	// Get rid of old data
@@ -3816,34 +3805,31 @@ static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
 	psg->sg[0].count = 0;
 
 	nseg = scsi_dma_map(scsicmd);
-	if (nseg < 0)
+	if (nseg <= 0)
 		return nseg;
-	if (nseg) {
-		struct scatterlist *sg;
-		int i;
-
-		scsi_for_each_sg(scsicmd, sg, nseg, i) {
-			int count = sg_dma_len(sg);
-			addr = sg_dma_address(sg);
-			psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
-			psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
-			psg->sg[i].count = cpu_to_le32(count);
-			byte_count += count;
-		}
-		psg->count = cpu_to_le32(nseg);
-		/* hba wants the size to be exact */
-		if (byte_count > scsi_bufflen(scsicmd)) {
-			u32 temp = le32_to_cpu(psg->sg[i-1].count) -
-				(byte_count - scsi_bufflen(scsicmd));
-			psg->sg[i-1].count = cpu_to_le32(temp);
-			byte_count = scsi_bufflen(scsicmd);
-		}
-		/* Check for command underflow */
-		if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
-			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
-					byte_count, scsicmd->underflow);
-		}
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		int count = sg_dma_len(sg);
+		addr = sg_dma_address(sg);
+		psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
+		psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
+		psg->sg[i].count = cpu_to_le32(count);
+		byte_count += count;
+	}
+	psg->count = cpu_to_le32(nseg);
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+			(byte_count - scsi_bufflen(scsicmd));
+		psg->sg[i-1].count = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
 	}
+
 	return byte_count;
 }
 
@@ -3851,6 +3837,8 @@ static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
 {
 	unsigned long byte_count = 0;
 	int nseg;
+	struct scatterlist *sg;
+	int i;
 
 	// Get rid of old data
 	psg->count = 0;
@@ -3862,37 +3850,34 @@ static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
 	psg->sg[0].flags = 0;
 
 	nseg = scsi_dma_map(scsicmd);
-	if (nseg < 0)
+	if (nseg <= 0)
 		return nseg;
-	if (nseg) {
-		struct scatterlist *sg;
-		int i;
-
-		scsi_for_each_sg(scsicmd, sg, nseg, i) {
-			int count = sg_dma_len(sg);
-			u64 addr = sg_dma_address(sg);
-			psg->sg[i].next = 0;
-			psg->sg[i].prev = 0;
-			psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
-			psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
-			psg->sg[i].count = cpu_to_le32(count);
-			psg->sg[i].flags = 0;
-			byte_count += count;
-		}
-		psg->count = cpu_to_le32(nseg);
-		/* hba wants the size to be exact */
-		if (byte_count > scsi_bufflen(scsicmd)) {
-			u32 temp = le32_to_cpu(psg->sg[i-1].count) -
-				(byte_count - scsi_bufflen(scsicmd));
-			psg->sg[i-1].count = cpu_to_le32(temp);
-			byte_count = scsi_bufflen(scsicmd);
-		}
-		/* Check for command underflow */
-		if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
-			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
-					byte_count, scsicmd->underflow);
-		}
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		int count = sg_dma_len(sg);
+		u64 addr = sg_dma_address(sg);
+		psg->sg[i].next = 0;
+		psg->sg[i].prev = 0;
+		psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
+		psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
+		psg->sg[i].count = cpu_to_le32(count);
+		psg->sg[i].flags = 0;
+		byte_count += count;
 	}
+	psg->count = cpu_to_le32(nseg);
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+			(byte_count - scsi_bufflen(scsicmd));
+		psg->sg[i-1].count = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
+	}
+
 	return byte_count;
 }
 
@@ -3901,75 +3886,77 @@ static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
 {
 	unsigned long byte_count = 0;
 	int nseg;
+	struct scatterlist *sg;
+	int i, conformable = 0;
+	u32 min_size = PAGE_SIZE, cur_size;
 
 	nseg = scsi_dma_map(scsicmd);
-	if (nseg < 0)
+	if (nseg <= 0)
 		return nseg;
-	if (nseg) {
-		struct scatterlist *sg;
-		int i, conformable = 0;
-		u32 min_size = PAGE_SIZE, cur_size;
-
-		scsi_for_each_sg(scsicmd, sg, nseg, i) {
-			int count = sg_dma_len(sg);
-			u64 addr = sg_dma_address(sg);
-
-			BUG_ON(i >= sg_max);
-			rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
-			rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
-			cur_size = cpu_to_le32(count);
-			rio2->sge[i].length = cur_size;
-			rio2->sge[i].flags = 0;
-			if (i == 0) {
-				conformable = 1;
-				rio2->sgeFirstSize = cur_size;
-			} else if (i == 1) {
-				rio2->sgeNominalSize = cur_size;
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		int count = sg_dma_len(sg);
+		u64 addr = sg_dma_address(sg);
+
+		BUG_ON(i >= sg_max);
+		rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
+		rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
+		cur_size = cpu_to_le32(count);
+		rio2->sge[i].length = cur_size;
+		rio2->sge[i].flags = 0;
+		if (i == 0) {
+			conformable = 1;
+			rio2->sgeFirstSize = cur_size;
+		} else if (i == 1) {
+			rio2->sgeNominalSize = cur_size;
+			min_size = cur_size;
+		} else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
+			conformable = 0;
+			if (cur_size < min_size)
 				min_size = cur_size;
-			} else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
-				conformable = 0;
-				if (cur_size < min_size)
-					min_size = cur_size;
-			}
-			byte_count += count;
 		}
+		byte_count += count;
+	}
 
-		/* hba wants the size to be exact */
-		if (byte_count > scsi_bufflen(scsicmd)) {
-			u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
-				(byte_count - scsi_bufflen(scsicmd));
-			rio2->sge[i-1].length = cpu_to_le32(temp);
-			byte_count = scsi_bufflen(scsicmd);
-		}
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
+			(byte_count - scsi_bufflen(scsicmd));
+		rio2->sge[i-1].length = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
 
-		rio2->sgeCnt = cpu_to_le32(nseg);
-		rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
-		/* not conformable: evaluate required sg elements */
-		if (!conformable) {
-			int j, nseg_new = nseg, err_found;
-			for (i = min_size / PAGE_SIZE; i >= 1; --i) {
-				err_found = 0;
-				nseg_new = 2;
-				for (j = 1; j < nseg - 1; ++j) {
-					if (rio2->sge[j].length % (i*PAGE_SIZE)) {
-						err_found = 1;
-						break;
-					}
-					nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
-				}
-				if (!err_found)
+	rio2->sgeCnt = cpu_to_le32(nseg);
+	rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
+	/* not conformable: evaluate required sg elements */
+	if (!conformable) {
+		int j, nseg_new = nseg, err_found;
+		for (i = min_size / PAGE_SIZE; i >= 1; --i) {
+			err_found = 0;
+			nseg_new = 2;
+			for (j = 1; j < nseg - 1; ++j) {
+				if (rio2->sge[j].length % (i*PAGE_SIZE)) {
+					err_found = 1;
 					break;
+				}
+				nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
 			}
-			if (i > 0 && nseg_new <= sg_max)
-				aac_convert_sgraw2(rio2, i, nseg, nseg_new);
-		} else
-			rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+			if (!err_found)
+				break;
+		}
+		if (i > 0 && nseg_new <= sg_max) {
+			int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new);
 
-		/* Check for command underflow */
-		if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
-			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
-					byte_count, scsicmd->underflow);
+			if (ret < 0)
+				return ret;
 		}
+	} else
+		rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
 	}
 
 	return byte_count;
@@ -3986,7 +3973,7 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int
 
 	sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
 	if (sge == NULL)
-		return -1;
+		return -ENOMEM;
 
 	for (i = 1, pos = 1; i < nseg-1; ++i) {
 		for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index ee2667e20e4239f9129ebe21e7983a2428065b88..92fabf2b0c24c4c24f79ae8626d80ef09853816d 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1723,6 +1723,7 @@ struct aac_dev
 #define FIB_CONTEXT_FLAG_FASTRESP		(0x00000008)
 #define FIB_CONTEXT_FLAG_NATIVE_HBA		(0x00000010)
 #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF	(0x00000020)
+#define FIB_CONTEXT_FLAG_SCSI_CMD	(0x00000040)
 
 /*
  *	Define the command values
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 9ee025b1d0e041d4afbdb14667e9b5ea9112e891..97d269f1688863a90f0263c5668441650fd2cadc 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -520,9 +520,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
 			dev->raw_io_64 = 1;
 		dev->sync_mode = aac_sync_mode;
 		if (dev->a_ops.adapter_comm &&
-			(status[1] & AAC_OPT_NEW_COMM)) {
-				dev->comm_interface = AAC_COMM_MESSAGE;
-				dev->raw_io_interface = 1;
+		    (status[1] & AAC_OPT_NEW_COMM)) {
+			dev->comm_interface = AAC_COMM_MESSAGE;
+			dev->raw_io_interface = 1;
 			if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) {
 				/* driver supports TYPE1 (Tupelo) */
 				dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 1c617ccfaf12e4d140477c24faf1ca0038317797..dfe8e70f8d9968d8b3f2e251bc1639f8d26f562f 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -770,7 +770,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 		/* bit1 of request_id must be 0 */
 		hbacmd->request_id =
 			cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
-	} else
+		fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
+	} else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
 		return -EINVAL;
 
 
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0f277df73af0f6b66d064536627e93b306664502..87cc4a93e637e6db517c12b0c01bb212eb2e8445 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -814,111 +814,225 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
 	return ret;
 }
 
+static u8 aac_eh_tmf_lun_reset_fib(struct aac_hba_map_info *info,
+				   struct fib *fib, u64 tmf_lun)
+{
+	struct aac_hba_tm_req *tmf;
+	u64 address;
+
+	/* start a HBA_TMF_LUN_RESET TMF request */
+	tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
+	memset(tmf, 0, sizeof(*tmf));
+	tmf->tmf = HBA_TMF_LUN_RESET;
+	tmf->it_nexus = info->rmw_nexus;
+	int_to_scsilun(tmf_lun, (struct scsi_lun *)tmf->lun);
+
+	address = (u64)fib->hw_error_pa;
+	tmf->error_ptr_hi = cpu_to_le32
+		((u32)(address >> 32));
+	tmf->error_ptr_lo = cpu_to_le32
+		((u32)(address & 0xffffffff));
+	tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+	fib->hbacmd_size = sizeof(*tmf);
+
+	return HBA_IU_TYPE_SCSI_TM_REQ;
+}
+
+static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
+				    struct fib *fib)
+{
+	struct aac_hba_reset_req *rst;
+	u64 address;
+
+	/* already tried, start a hard reset now */
+	rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
+	memset(rst, 0, sizeof(*rst));
+	rst->it_nexus = info->rmw_nexus;
+
+	address = (u64)fib->hw_error_pa;
+	rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+	rst->error_ptr_lo = cpu_to_le32
+		((u32)(address & 0xffffffff));
+	rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+	fib->hbacmd_size = sizeof(*rst);
+
+       return HBA_IU_TYPE_SATA_REQ;
+}
+
+void aac_tmf_callback(void *context, struct fib *fibptr)
+{
+	struct aac_hba_resp *err =
+		&((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
+	struct aac_hba_map_info *info = context;
+	int res;
+
+	switch (err->service_response) {
+	case HBA_RESP_SVCRES_TMF_REJECTED:
+		res = -1;
+		break;
+	case HBA_RESP_SVCRES_TMF_LUN_INVALID:
+		res = 0;
+		break;
+	case HBA_RESP_SVCRES_TMF_COMPLETE:
+	case HBA_RESP_SVCRES_TMF_SUCCEEDED:
+		res = 0;
+		break;
+	default:
+		res = -2;
+		break;
+	}
+	aac_fib_complete(fibptr);
+
+	info->reset_state = res;
+}
+
 /*
- *	aac_eh_reset	- Reset command handling
+ *	aac_eh_dev_reset	- Device reset command handling
  *	@scsi_cmd:	SCSI command block causing the reset
  *
  */
-static int aac_eh_reset(struct scsi_cmnd* cmd)
+static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
 {
 	struct scsi_device * dev = cmd->device;
 	struct Scsi_Host * host = dev->host;
 	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	struct aac_hba_map_info *info;
 	int count;
 	u32 bus, cid;
+	struct fib *fib;
 	int ret = FAILED;
-	int status = 0;
-	__le32 supported_options2 = 0;
-	bool is_mu_reset;
-	bool is_ignore_reset;
-	bool is_doorbell_reset;
-
+	int status;
+	u8 command;
 
 	bus = aac_logical_to_phys(scmd_channel(cmd));
 	cid = scmd_id(cmd);
-	if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
-		aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
-		struct fib *fib;
-		int status;
-		u64 address;
-		u8 command;
+	info = &aac->hba_map[bus][cid];
+	if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
+	    info->devtype != AAC_DEVTYPE_NATIVE_RAW)
+		return FAILED;
 
-		pr_err("%s: Host adapter reset request. SCSI hang ?\n",
-			AAC_DRIVERNAME);
+	if (info->reset_state > 0)
+		return FAILED;
 
-		fib = aac_fib_alloc(aac);
-		if (!fib)
-			return ret;
+	pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+	       AAC_DRIVERNAME);
+
+	fib = aac_fib_alloc(aac);
+	if (!fib)
+		return ret;
+
+	/* start a HBA_TMF_LUN_RESET TMF request */
+	command = aac_eh_tmf_lun_reset_fib(info, fib, dev->lun);
 
+	info->reset_state = 1;
 
-		if (aac->hba_map[bus][cid].reset_state == 0) {
-			struct aac_hba_tm_req *tmf;
-
-			/* start a HBA_TMF_LUN_RESET TMF request */
-			tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
-			memset(tmf, 0, sizeof(*tmf));
-			tmf->tmf = HBA_TMF_LUN_RESET;
-			tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
-			tmf->lun[1] = cmd->device->lun;
-
-			address = (u64)fib->hw_error_pa;
-			tmf->error_ptr_hi = cpu_to_le32
-					((u32)(address >> 32));
-			tmf->error_ptr_lo = cpu_to_le32
-					((u32)(address & 0xffffffff));
-			tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
-			fib->hbacmd_size = sizeof(*tmf);
-
-			command = HBA_IU_TYPE_SCSI_TM_REQ;
-			aac->hba_map[bus][cid].reset_state++;
-		} else if (aac->hba_map[bus][cid].reset_state >= 1) {
-			struct aac_hba_reset_req *rst;
-
-			/* already tried, start a hard reset now */
-			rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
-			memset(rst, 0, sizeof(*rst));
-			/* reset_type is already zero... */
-			rst->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
-
-			address = (u64)fib->hw_error_pa;
-			rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
-			rst->error_ptr_lo = cpu_to_le32
-				((u32)(address & 0xffffffff));
-			rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
-			fib->hbacmd_size = sizeof(*rst);
-
-			command = HBA_IU_TYPE_SATA_REQ;
-			aac->hba_map[bus][cid].reset_state = 0;
+	status = aac_hba_send(command, fib,
+			      (fib_callback) aac_tmf_callback,
+			      (void *) info);
+
+	/* Wait up to 15 seconds for completion */
+	for (count = 0; count < 15; ++count) {
+		if (info->reset_state == 0) {
+			ret = info->reset_state == 0 ? SUCCESS : FAILED;
+			break;
 		}
-		cmd->SCp.sent_command = 0;
+		msleep(1000);
+	}
 
-		status = aac_hba_send(command, fib,
-				  (fib_callback) aac_hba_callback,
-				  (void *) cmd);
+	return ret;
+}
 
-		/* Wait up to 15 seconds for completion */
-		for (count = 0; count < 15; ++count) {
-			if (cmd->SCp.sent_command) {
-				ret = SUCCESS;
-				break;
-			}
-			msleep(1000);
+/*
+ *	aac_eh_target_reset	- Target reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+static int aac_eh_target_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	struct aac_hba_map_info *info;
+	int count;
+	u32 bus, cid;
+	int ret = FAILED;
+	struct fib *fib;
+	int status;
+	u8 command;
+
+	bus = aac_logical_to_phys(scmd_channel(cmd));
+	cid = scmd_id(cmd);
+	info = &aac->hba_map[bus][cid];
+	if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
+	    info->devtype != AAC_DEVTYPE_NATIVE_RAW)
+		return FAILED;
+
+	if (info->reset_state > 0)
+		return FAILED;
+
+	pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+	       AAC_DRIVERNAME);
+
+	fib = aac_fib_alloc(aac);
+	if (!fib)
+		return ret;
+
+
+	/* already tried, start a hard reset now */
+	command = aac_eh_tmf_hard_reset_fib(info, fib);
+
+	info->reset_state = 2;
+
+	status = aac_hba_send(command, fib,
+			      (fib_callback) aac_tmf_callback,
+			      (void *) info);
+
+	/* Wait up to 15 seconds for completion */
+	for (count = 0; count < 15; ++count) {
+		if (info->reset_state <= 0) {
+			ret = info->reset_state == 0 ? SUCCESS : FAILED;
+			break;
 		}
+		msleep(1000);
+	}
 
-		if (ret == SUCCESS)
-			goto out;
+	return ret;
+}
 
-	} else {
+/*
+ *	aac_eh_bus_reset	- Bus reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	int count;
+	u32 cmd_bus;
+	int status = 0;
 
-		/* Mark the assoc. FIB to not complete, eh handler does this */
-		for (count = 0;
-			count < (host->can_queue + AAC_NUM_MGT_FIB);
-			++count) {
-			struct fib *fib = &aac->fibs[count];
 
-			if (fib->hw_fib_va->header.XferState &&
-				(fib->flags & FIB_CONTEXT_FLAG) &&
-				(fib->callback_data == cmd)) {
+	cmd_bus = aac_logical_to_phys(scmd_channel(cmd));
+	/* Mark the assoc. FIB to not complete, eh handler does this */
+	for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+		struct fib *fib = &aac->fibs[count];
+
+		if (fib->hw_fib_va->header.XferState &&
+		    (fib->flags & FIB_CONTEXT_FLAG) &&
+		    (fib->flags & FIB_CONTEXT_FLAG_SCSI_CMD)) {
+			struct aac_hba_map_info *info;
+			u32 bus, cid;
+
+			cmd = (struct scsi_cmnd *)fib->callback_data;
+			bus = aac_logical_to_phys(scmd_channel(cmd));
+			if (bus != cmd_bus)
+				continue;
+			cid = scmd_id(cmd);
+			info = &aac->hba_map[bus][cid];
+			if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
+			    info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
 				fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
 				cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
 			}
@@ -935,8 +1049,24 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
 		dev_err(&aac->pdev->dev, "Adapter health - %d\n", status);
 
 	count = get_num_of_incomplete_fibs(aac);
-	if (count == 0)
-		return SUCCESS;
+	return (count == 0) ? SUCCESS : FAILED;
+}
+
+/*
+ *	aac_eh_host_reset	- Host reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+int aac_eh_host_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	int ret = FAILED;
+	__le32 supported_options2 = 0;
+	bool is_mu_reset;
+	bool is_ignore_reset;
+	bool is_doorbell_reset;
 
 	/*
 	 * Check if reset is supported by the firmware
@@ -954,11 +1084,24 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
 	 && aac_check_reset
 	 && (aac_check_reset != -1 || !is_ignore_reset)) {
 		/* Bypass wait for command quiesce */
-		aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
+		if (aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET) == 0)
+			ret = SUCCESS;
+	}
+	/*
+	 * Reset EH state
+	 */
+	if (ret == SUCCESS) {
+		int bus, cid;
+		struct aac_hba_map_info *info;
+
+		for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
+			for (cid = 0; cid < AAC_MAX_TARGETS; cid++) {
+				info = &aac->hba_map[bus][cid];
+				if (info->devtype == AAC_DEVTYPE_NATIVE_RAW)
+					info->reset_state = 0;
+			}
+		}
 	}
-	ret = SUCCESS;
-
-out:
 	return ret;
 }
 
@@ -1382,7 +1525,10 @@ static struct scsi_host_template aac_driver_template = {
 	.change_queue_depth		= aac_change_queue_depth,
 	.sdev_attrs			= aac_dev_attrs,
 	.eh_abort_handler		= aac_eh_abort,
-	.eh_host_reset_handler		= aac_eh_reset,
+	.eh_device_reset_handler	= aac_eh_dev_reset,
+	.eh_target_reset_handler	= aac_eh_target_reset,
+	.eh_bus_reset_handler		= aac_eh_bus_reset,
+	.eh_host_reset_handler		= aac_eh_host_reset,
 	.can_queue			= AAC_NUM_IO_FIB,
 	.this_id			= MAXIMUM_NUM_CONTAINERS,
 	.sg_tablesize			= 16,
@@ -1457,7 +1603,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	/*
 	 * Only series 7 needs freset.
 	 */
-	 if (pdev->device == PMC_DEVICE_S7)
+	if (pdev->device == PMC_DEVICE_S7)
 		pdev->needs_freset = 1;
 
 	list_for_each_entry(aac, &aac_devices, entry) {
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index ce5dc73d85bb1a467388cad7d86b3a5075e0f041..bc0058df31c610ca83689ee71c94a7bafdf350ea 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1140,6 +1140,9 @@ static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs)
 /*
  * Reset the bus
  *
+ * AIC-6260 has a hard reset (MRST signal), but apparently
+ * one cannot trigger it via software. So live with
+ * a soft reset; no-one seemed to have cared.
  */
 static int aha152x_bus_reset_host(struct Scsi_Host *shpnt)
 {
@@ -1222,15 +1225,6 @@ int aha152x_host_reset_host(struct Scsi_Host *shpnt)
 	return SUCCESS;
 }
 
-/*
- * Reset the host (bus and controller)
- *
- */
-static int aha152x_host_reset(Scsi_Cmnd *SCpnt)
-{
-	return aha152x_host_reset_host(SCpnt->device->host);
-}
-
 /*
  * Return the "logical geometry"
  *
@@ -2917,7 +2911,6 @@ static struct scsi_host_template aha152x_driver_template = {
 	.eh_abort_handler		= aha152x_abort,
 	.eh_device_reset_handler	= aha152x_device_reset,
 	.eh_bus_reset_handler		= aha152x_bus_reset,
-	.eh_host_reset_handler		= aha152x_host_reset,
 	.bios_param			= aha152x_biosparam,
 	.can_queue			= 1,
 	.this_id			= 7,
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index a23cc9ac5acdad15b3d3a95a2227b969f10ba791..124217927c4af267554733df8cad79afe9cc7bee 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -986,7 +986,7 @@ static struct isa_driver aha1542_isa_driver = {
 static int isa_registered;
 
 #ifdef CONFIG_PNP
-static struct pnp_device_id aha1542_pnp_ids[] = {
+static const struct pnp_device_id aha1542_pnp_ids[] = {
 	{ .id = "ADP1542" },
 	{ .id = "" }
 };
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
index 07b60a780c06b8b2be234e1a5f4ea97403f1c54f..b03ba0df7a83c624c714a0b0d9a2f85b7b785784 100644
--- a/drivers/scsi/aic7xxx/Makefile
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -59,7 +59,8 @@ $(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
 			      $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \
 			      $(srctree)/$(src)/aic7xxx.seq
 
-$(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h
+$(aic7xxx-gen-y): $(objtree)/$(obj)/aic7xxx_seq.h
+	@true
 else
 $(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped
 endif
@@ -76,7 +77,8 @@ $(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
 			      $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \
 			      $(srctree)/$(src)/aic79xx.seq
 
-$(aic79xx-gen-y): $(obj)/aic79xx_seq.h
+$(aic79xx-gen-y): $(objtree)/$(obj)/aic79xx_seq.h
+	@true
 else
 $(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
 endif
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
index cdcead071ef6449c337b99d1696910c319cd9b70..ddcd5a7701ac27431218f54d6cd6924181ff8d00 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -12,13 +12,6 @@ typedef struct ahd_reg_parse_entry {
 	uint8_t	 mask;
 } ahd_reg_parse_entry_t;
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_mode_ptr_print;
-#else
-#define ahd_mode_ptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MODE_PTR", 0x00, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_intstat_print;
 #else
@@ -26,27 +19,6 @@ ahd_reg_print_t ahd_intstat_print;
     ahd_print_register(NULL, 0, "INTSTAT", 0x01, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqintcode_print;
-#else
-#define ahd_seqintcode_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEQINTCODE", 0x02, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_error_print;
-#else
-#define ahd_error_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ERROR", 0x04, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hescb_qoff_print;
-#else
-#define ahd_hescb_qoff_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "HESCB_QOFF", 0x08, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_hs_mailbox_print;
 #else
@@ -61,27 +33,6 @@ ahd_reg_print_t ahd_seqintstat_print;
     ahd_print_register(NULL, 0, "SEQINTSTAT", 0x0c, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrseqintstat_print;
-#else
-#define ahd_clrseqintstat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRSEQINTSTAT", 0x0c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_swtimer_print;
-#else
-#define ahd_swtimer_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SWTIMER", 0x0e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sescb_qoff_print;
-#else
-#define ahd_sescb_qoff_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SESCB_QOFF", 0x12, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_intctl_print;
 #else
@@ -110,111 +61,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print;
     ahd_print_register(NULL, 0, "SG_CACHE_SHADOW", 0x1b, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqin_print;
-#else
-#define ahd_lqin_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQIN", 0x20, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lunptr_print;
-#else
-#define ahd_lunptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LUNPTR", 0x22, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmdlenptr_print;
-#else
-#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMDLENPTR", 0x25, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_attrptr_print;
-#else
-#define ahd_attrptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ATTRPTR", 0x26, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flagptr_print;
-#else
-#define ahd_flagptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "FLAGPTR", 0x27, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmdptr_print;
-#else
-#define ahd_cmdptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMDPTR", 0x28, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_qnextptr_print;
-#else
-#define ahd_qnextptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "QNEXTPTR", 0x29, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_abrtbyteptr_print;
-#else
-#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ABRTBYTEPTR", 0x2b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_abrtbitptr_print;
-#else
-#define ahd_abrtbitptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ABRTBITPTR", 0x2c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lunlen_print;
-#else
-#define ahd_lunlen_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LUNLEN", 0x30, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cdblimit_print;
-#else
-#define ahd_cdblimit_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CDBLIMIT", 0x31, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_maxcmd_print;
-#else
-#define ahd_maxcmd_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MAXCMD", 0x32, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_maxcmdcnt_print;
-#else
-#define ahd_maxcmdcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MAXCMDCNT", 0x33, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqctl1_print;
-#else
-#define ahd_lqctl1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQCTL1", 0x38, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqctl2_print;
-#else
-#define ahd_lqctl2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQCTL2", 0x39, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scsiseq0_print;
 #else
@@ -229,13 +75,6 @@ ahd_reg_print_t ahd_scsiseq1_print;
     ahd_print_register(NULL, 0, "SCSISEQ1", 0x3b, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sxfrctl0_print;
-#else
-#define ahd_sxfrctl0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SXFRCTL0", 0x3c, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_dffstat_print;
 #else
@@ -243,13 +82,6 @@ ahd_reg_print_t ahd_dffstat_print;
     ahd_print_register(NULL, 0, "DFFSTAT", 0x3f, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_multargid_print;
-#else
-#define ahd_multargid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scsisigi_print;
 #else
@@ -264,977 +96,270 @@ ahd_reg_print_t ahd_scsiphase_print;
     ahd_print_register(NULL, 0, "SCSIPHASE", 0x42, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsidat_print;
-#else
-#define ahd_scsidat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSIDAT", 0x44, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scsibus_print;
 #else
 #define ahd_scsibus_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSIBUS", 0x46, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_targidin_print;
-#else
-#define ahd_targidin_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "TARGIDIN", 0x48, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_selid_print;
-#else
-#define ahd_selid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SELID", 0x49, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sblkctl_print;
-#else
-#define ahd_sblkctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SBLKCTL", 0x4a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sstat0_print;
-#else
-#define ahd_sstat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SSTAT0", 0x4b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_simode0_print;
-#else
-#define ahd_simode0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SIMODE0", 0x4b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sstat1_print;
-#else
-#define ahd_sstat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SSTAT1", 0x4c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sstat2_print;
-#else
-#define ahd_sstat2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint2_print;
-#else
-#define ahd_clrsint2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRSINT2", 0x4d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_perrdiag_print;
-#else
-#define ahd_perrdiag_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PERRDIAG", 0x4e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqistate_print;
-#else
-#define ahd_lqistate_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQISTATE", 0x4e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_soffcnt_print;
-#else
-#define ahd_soffcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SOFFCNT", 0x4f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqostate_print;
-#else
-#define ahd_lqostate_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOSTATE", 0x4f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqistat0_print;
-#else
-#define ahd_lqistat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQISTAT0", 0x50, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqiint0_print;
-#else
-#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqimode0_print;
-#else
-#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqimode1_print;
-#else
-#define ahd_lqimode1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQIMODE1", 0x51, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqistat1_print;
-#else
-#define ahd_lqistat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQISTAT1", 0x51, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqiint1_print;
-#else
-#define ahd_clrlqiint1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRLQIINT1", 0x51, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqistat2_print;
-#else
-#define ahd_lqistat2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQISTAT2", 0x52, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sstat3_print;
-#else
-#define ahd_sstat3_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SSTAT3", 0x53, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_simode3_print;
-#else
-#define ahd_simode3_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SIMODE3", 0x53, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint3_print;
-#else
-#define ahd_clrsint3_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRSINT3", 0x53, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqostat0_print;
-#else
-#define ahd_lqostat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOSTAT0", 0x54, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqoint0_print;
-#else
-#define ahd_clrlqoint0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRLQOINT0", 0x54, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqomode0_print;
-#else
-#define ahd_lqomode0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOMODE0", 0x54, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqomode1_print;
-#else
-#define ahd_lqomode1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOMODE1", 0x55, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqostat1_print;
-#else
-#define ahd_lqostat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOSTAT1", 0x55, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqoint1_print;
-#else
-#define ahd_clrlqoint1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRLQOINT1", 0x55, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqostat2_print;
-#else
-#define ahd_lqostat2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOSTAT2", 0x56, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_os_space_cnt_print;
-#else
-#define ahd_os_space_cnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OS_SPACE_CNT", 0x56, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_simode1_print;
-#else
-#define ahd_simode1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SIMODE1", 0x57, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_gsfifo_print;
-#else
-#define ahd_gsfifo_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "GSFIFO", 0x58, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dffsxfrctl_print;
-#else
-#define ahd_dffsxfrctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DFFSXFRCTL", 0x5a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqoscsctl_print;
-#else
-#define ahd_lqoscsctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQOSCSCTL", 0x5a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_nextscb_print;
-#else
-#define ahd_nextscb_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEXTSCB", 0x5a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrseqintsrc_print;
-#else
-#define ahd_clrseqintsrc_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRSEQINTSRC", 0x5b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqintsrc_print;
-#else
-#define ahd_seqintsrc_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEQINTSRC", 0x5b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_currscb_print;
-#else
-#define ahd_currscb_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqimode_print;
-#else
-#define ahd_seqimode_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_mdffstat_print;
-#else
-#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lastscb_print;
-#else
-#define ahd_lastscb_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LASTSCB", 0x5e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_negoaddr_print;
-#else
-#define ahd_negoaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEGOADDR", 0x60, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_negperiod_print;
-#else
-#define ahd_negperiod_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEGPERIOD", 0x61, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_negoffset_print;
-#else
-#define ahd_negoffset_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEGOFFSET", 0x62, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_negppropts_print;
-#else
-#define ahd_negppropts_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEGPPROPTS", 0x63, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_negconopts_print;
-#else
-#define ahd_negconopts_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEGCONOPTS", 0x64, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_annexcol_print;
-#else
-#define ahd_annexcol_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ANNEXCOL", 0x65, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_annexdat_print;
-#else
-#define ahd_annexdat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ANNEXDAT", 0x66, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scschkn_print;
-#else
-#define ahd_scschkn_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSCHKN", 0x66, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_iownid_print;
-#else
-#define ahd_iownid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "IOWNID", 0x67, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_shcnt_print;
-#else
-#define ahd_shcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SHCNT", 0x68, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_townid_print;
-#else
-#define ahd_townid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "TOWNID", 0x69, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seloid_print;
-#else
-#define ahd_seloid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SELOID", 0x6b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scbhaddr_print;
-#else
-#define ahd_scbhaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCBHADDR", 0x7c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghaddr_print;
-#else
-#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scbhcnt_print;
-#else
-#define ahd_scbhcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCBHCNT", 0x84, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghcnt_print;
-#else
-#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pcixctl_print;
-#else
-#define ahd_pcixctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PCIXCTL", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchspltstat0_print;
-#else
-#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchspltstat1_print;
-#else
-#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgspltstat0_print;
-#else
-#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGSPLTSTAT0", 0x9e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgspltstat1_print;
-#else
-#define ahd_sgspltstat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGSPLTSTAT1", 0x9f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_df0pcistat_print;
-#else
-#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DF0PCISTAT", 0xa0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_reg0_print;
-#else
-#define ahd_reg0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "REG0", 0xa0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_reg_isr_print;
-#else
-#define ahd_reg_isr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "REG_ISR", 0xa4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sg_state_print;
-#else
-#define ahd_sg_state_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SG_STATE", 0xa6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_targpcistat_print;
-#else
-#define ahd_targpcistat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "TARGPCISTAT", 0xa7, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scbautoptr_print;
-#else
-#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCBAUTOPTR", 0xab, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbaddr_print;
-#else
-#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CCSCBADDR", 0xac, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbctl_print;
-#else
-#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CCSCBCTL", 0xad, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccsgctl_print;
-#else
-#define ahd_ccsgctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CCSGCTL", 0xad, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbram_print;
-#else
-#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CCSCBRAM", 0xb0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_brddat_print;
-#else
-#define ahd_brddat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "BRDDAT", 0xb8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seeadr_print;
-#else
-#define ahd_seeadr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEEADR", 0xba, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seedat_print;
-#else
-#define ahd_seedat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEEDAT", 0xbc, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seectl_print;
-#else
-#define ahd_seectl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEECTL", 0xbe, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seestat_print;
-#else
-#define ahd_seestat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEESTAT", 0xbe, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspdatactl_print;
-#else
-#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DSPDATACTL", 0xc1, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspselect_print;
-#else
-#define ahd_dspselect_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DSPSELECT", 0xc4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_wrtbiasctl_print;
-#else
-#define ahd_wrtbiasctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "WRTBIASCTL", 0xc5, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqctl0_print;
-#else
-#define ahd_seqctl0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEQCTL0", 0xd6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqintctl_print;
-#else
-#define ahd_seqintctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEQINTCTL", 0xd9, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_prgmcnt_print;
-#else
-#define ahd_prgmcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PRGMCNT", 0xde, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_none_print;
-#else
-#define ahd_none_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NONE", 0xea, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_intvec1_addr_print;
-#else
-#define ahd_intvec1_addr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INTVEC1_ADDR", 0xf4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_curaddr_print;
-#else
-#define ahd_curaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CURADDR", 0xf4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_intvec2_addr_print;
-#else
-#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INTVEC2_ADDR", 0xf6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_longjmp_addr_print;
-#else
-#define ahd_longjmp_addr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LONGJMP_ADDR", 0xf8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_accum_save_print;
-#else
-#define ahd_accum_save_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ACCUM_SAVE", 0xfa, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_waiting_scb_tails_print;
-#else
-#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sram_base_print;
-#else
-#define ahd_sram_base_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_waiting_tid_head_print;
-#else
-#define ahd_waiting_tid_head_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "WAITING_TID_HEAD", 0x120, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_waiting_tid_tail_print;
-#else
-#define ahd_waiting_tid_tail_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "WAITING_TID_TAIL", 0x122, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_next_queued_scb_addr_print;
-#else
-#define ahd_next_queued_scb_addr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NEXT_QUEUED_SCB_ADDR", 0x124, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_complete_scb_head_print;
-#else
-#define ahd_complete_scb_head_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "COMPLETE_SCB_HEAD", 0x128, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_complete_scb_dmainprog_head_print;
-#else
-#define ahd_complete_scb_dmainprog_head_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "COMPLETE_SCB_DMAINPROG_HEAD", 0x12a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_complete_dma_scb_head_print;
-#else
-#define ahd_complete_dma_scb_head_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_HEAD", 0x12c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_complete_dma_scb_tail_print;
-#else
-#define ahd_complete_dma_scb_tail_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_TAIL", 0x12e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_complete_on_qfreeze_head_print;
-#else
-#define ahd_complete_on_qfreeze_head_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "COMPLETE_ON_QFREEZE_HEAD", 0x130, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_qfreeze_count_print;
-#else
-#define ahd_qfreeze_count_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "QFREEZE_COUNT", 0x132, regvalue, cur_col, wrap)
+    ahd_print_register(NULL, 0, "SCSIBUS", 0x46, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_kernel_qfreeze_count_print;
+ahd_reg_print_t ahd_selid_print;
 #else
-#define ahd_kernel_qfreeze_count_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "KERNEL_QFREEZE_COUNT", 0x134, regvalue, cur_col, wrap)
+#define ahd_selid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SELID", 0x49, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_saved_mode_print;
+ahd_reg_print_t ahd_simode0_print;
 #else
-#define ahd_saved_mode_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SAVED_MODE", 0x136, regvalue, cur_col, wrap)
+#define ahd_simode0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SIMODE0", 0x4b, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_msg_out_print;
+ahd_reg_print_t ahd_sstat0_print;
 #else
-#define ahd_msg_out_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MSG_OUT", 0x137, regvalue, cur_col, wrap)
+#define ahd_sstat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SSTAT0", 0x4b, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seq_flags_print;
+ahd_reg_print_t ahd_sstat1_print;
 #else
-#define ahd_seq_flags_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEQ_FLAGS", 0x139, regvalue, cur_col, wrap)
+#define ahd_sstat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SSTAT1", 0x4c, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lastphase_print;
+ahd_reg_print_t ahd_sstat2_print;
 #else
-#define ahd_lastphase_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LASTPHASE", 0x13c, regvalue, cur_col, wrap)
+#define ahd_sstat2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_qoutfifo_entry_valid_tag_print;
+ahd_reg_print_t ahd_perrdiag_print;
 #else
-#define ahd_qoutfifo_entry_valid_tag_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "QOUTFIFO_ENTRY_VALID_TAG", 0x13d, regvalue, cur_col, wrap)
+#define ahd_perrdiag_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PERRDIAG", 0x4e, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_kernel_tqinpos_print;
+ahd_reg_print_t ahd_soffcnt_print;
 #else
-#define ahd_kernel_tqinpos_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "KERNEL_TQINPOS", 0x13e, regvalue, cur_col, wrap)
+#define ahd_soffcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SOFFCNT", 0x4f, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_qoutfifo_next_addr_print;
+ahd_reg_print_t ahd_lqistat0_print;
 #else
-#define ahd_qoutfifo_next_addr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "QOUTFIFO_NEXT_ADDR", 0x144, regvalue, cur_col, wrap)
+#define ahd_lqistat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQISTAT0", 0x50, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_last_msg_print;
+ahd_reg_print_t ahd_lqistat1_print;
 #else
-#define ahd_last_msg_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LAST_MSG", 0x14a, regvalue, cur_col, wrap)
+#define ahd_lqistat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQISTAT1", 0x51, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsiseq_template_print;
+ahd_reg_print_t ahd_lqistat2_print;
 #else
-#define ahd_scsiseq_template_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSISEQ_TEMPLATE", 0x14b, regvalue, cur_col, wrap)
+#define ahd_lqistat2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQISTAT2", 0x52, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_initiator_tag_print;
+ahd_reg_print_t ahd_sstat3_print;
 #else
-#define ahd_initiator_tag_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INITIATOR_TAG", 0x14c, regvalue, cur_col, wrap)
+#define ahd_sstat3_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SSTAT3", 0x53, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seq_flags2_print;
+ahd_reg_print_t ahd_lqostat0_print;
 #else
-#define ahd_seq_flags2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEQ_FLAGS2", 0x14d, regvalue, cur_col, wrap)
+#define ahd_lqostat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQOSTAT0", 0x54, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_allocfifo_scbptr_print;
+ahd_reg_print_t ahd_lqostat1_print;
 #else
-#define ahd_allocfifo_scbptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ALLOCFIFO_SCBPTR", 0x14e, regvalue, cur_col, wrap)
+#define ahd_lqostat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQOSTAT1", 0x55, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_int_coalescing_timer_print;
+ahd_reg_print_t ahd_lqostat2_print;
 #else
-#define ahd_int_coalescing_timer_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INT_COALESCING_TIMER", 0x150, regvalue, cur_col, wrap)
+#define ahd_lqostat2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQOSTAT2", 0x56, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_int_coalescing_maxcmds_print;
+ahd_reg_print_t ahd_simode1_print;
 #else
-#define ahd_int_coalescing_maxcmds_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INT_COALESCING_MAXCMDS", 0x152, regvalue, cur_col, wrap)
+#define ahd_simode1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SIMODE1", 0x57, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_int_coalescing_mincmds_print;
+ahd_reg_print_t ahd_dffsxfrctl_print;
 #else
-#define ahd_int_coalescing_mincmds_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INT_COALESCING_MINCMDS", 0x153, regvalue, cur_col, wrap)
+#define ahd_dffsxfrctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFFSXFRCTL", 0x5a, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmds_pending_print;
+ahd_reg_print_t ahd_seqintsrc_print;
 #else
-#define ahd_cmds_pending_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMDS_PENDING", 0x154, regvalue, cur_col, wrap)
+#define ahd_seqintsrc_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQINTSRC", 0x5b, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_int_coalescing_cmdcount_print;
+ahd_reg_print_t ahd_seqimode_print;
 #else
-#define ahd_int_coalescing_cmdcount_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "INT_COALESCING_CMDCOUNT", 0x156, regvalue, cur_col, wrap)
+#define ahd_seqimode_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_local_hs_mailbox_print;
+ahd_reg_print_t ahd_mdffstat_print;
 #else
-#define ahd_local_hs_mailbox_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LOCAL_HS_MAILBOX", 0x157, regvalue, cur_col, wrap)
+#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmdsize_table_print;
+ahd_reg_print_t ahd_seloid_print;
 #else
-#define ahd_cmdsize_table_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMDSIZE_TABLE", 0x158, regvalue, cur_col, wrap)
+#define ahd_seloid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SELOID", 0x6b, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_mk_message_scb_print;
+ahd_reg_print_t ahd_sg_state_print;
 #else
-#define ahd_mk_message_scb_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MK_MESSAGE_SCB", 0x160, regvalue, cur_col, wrap)
+#define ahd_sg_state_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SG_STATE", 0xa6, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_mk_message_scsiid_print;
+ahd_reg_print_t ahd_ccscbctl_print;
 #else
-#define ahd_mk_message_scsiid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MK_MESSAGE_SCSIID", 0x162, regvalue, cur_col, wrap)
+#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CCSCBCTL", 0xad, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_base_print;
+ahd_reg_print_t ahd_ccsgctl_print;
 #else
-#define ahd_scb_base_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
+#define ahd_ccsgctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CCSGCTL", 0xad, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_residual_datacnt_print;
+ahd_reg_print_t ahd_seqctl0_print;
 #else
-#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
+#define ahd_seqctl0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQCTL0", 0xd6, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_sense_busaddr_print;
+ahd_reg_print_t ahd_seqintctl_print;
 #else
-#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR", 0x18c, regvalue, cur_col, wrap)
+#define ahd_seqintctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQINTCTL", 0xd9, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_tag_print;
+ahd_reg_print_t ahd_sram_base_print;
 #else
-#define ahd_scb_tag_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_TAG", 0x190, regvalue, cur_col, wrap)
+#define ahd_sram_base_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_control_print;
+ahd_reg_print_t ahd_qfreeze_count_print;
 #else
-#define ahd_scb_control_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_CONTROL", 0x192, regvalue, cur_col, wrap)
+#define ahd_qfreeze_count_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "QFREEZE_COUNT", 0x132, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_scsiid_print;
+ahd_reg_print_t ahd_kernel_qfreeze_count_print;
 #else
-#define ahd_scb_scsiid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_SCSIID", 0x193, regvalue, cur_col, wrap)
+#define ahd_kernel_qfreeze_count_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "KERNEL_QFREEZE_COUNT", 0x134, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_lun_print;
+ahd_reg_print_t ahd_saved_mode_print;
 #else
-#define ahd_scb_lun_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_LUN", 0x194, regvalue, cur_col, wrap)
+#define ahd_saved_mode_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SAVED_MODE", 0x136, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_task_attribute_print;
+ahd_reg_print_t ahd_seq_flags_print;
 #else
-#define ahd_scb_task_attribute_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_TASK_ATTRIBUTE", 0x195, regvalue, cur_col, wrap)
+#define ahd_seq_flags_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQ_FLAGS", 0x139, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_task_management_print;
+ahd_reg_print_t ahd_lastphase_print;
 #else
-#define ahd_scb_task_management_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_TASK_MANAGEMENT", 0x197, regvalue, cur_col, wrap)
+#define ahd_lastphase_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LASTPHASE", 0x13c, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_dataptr_print;
+ahd_reg_print_t ahd_seq_flags2_print;
 #else
-#define ahd_scb_dataptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_DATAPTR", 0x198, regvalue, cur_col, wrap)
+#define ahd_seq_flags2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQ_FLAGS2", 0x14d, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_datacnt_print;
+ahd_reg_print_t ahd_mk_message_scb_print;
 #else
-#define ahd_scb_datacnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_DATACNT", 0x1a0, regvalue, cur_col, wrap)
+#define ahd_mk_message_scb_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MK_MESSAGE_SCB", 0x160, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_sgptr_print;
+ahd_reg_print_t ahd_mk_message_scsiid_print;
 #else
-#define ahd_scb_sgptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_SGPTR", 0x1a4, regvalue, cur_col, wrap)
+#define ahd_mk_message_scsiid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MK_MESSAGE_SCSIID", 0x162, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_busaddr_print;
+ahd_reg_print_t ahd_scb_base_print;
 #else
-#define ahd_scb_busaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_BUSADDR", 0x1a8, regvalue, cur_col, wrap)
+#define ahd_scb_base_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_next2_print;
+ahd_reg_print_t ahd_scb_control_print;
 #else
-#define ahd_scb_next2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_NEXT2", 0x1ae, regvalue, cur_col, wrap)
+#define ahd_scb_control_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_CONTROL", 0x192, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_disconnected_lists_print;
+ahd_reg_print_t ahd_scb_scsiid_print;
 #else
-#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS", 0x1b8, regvalue, cur_col, wrap)
+#define ahd_scb_scsiid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_SCSIID", 0x193, regvalue, cur_col, wrap)
 #endif
 
 
@@ -1292,15 +417,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CLRCMDINT       	0x02
 #define		CLRSPLTINT      	0x01
 
-#define	ERROR           		0x04
-#define		CIOPARERR       	0x80
-#define		CIOACCESFAIL    	0x40
-#define		MPARERR         	0x20
-#define		DPARERR         	0x10
-#define		SQPARERR        	0x08
-#define		ILLOPCODE       	0x04
-#define		DSCTMOUT        	0x02
-
 #define	CLRERR          		0x04
 #define		CLRCIOPARERR    	0x80
 #define		CLRCIOACCESFAIL 	0x40
@@ -1310,6 +426,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CLRILLOPCODE    	0x04
 #define		CLRDSCTMOUT     	0x02
 
+#define	ERROR           		0x04
+#define		CIOPARERR       	0x80
+#define		CIOACCESFAIL    	0x40
+#define		MPARERR         	0x20
+#define		DPARERR         	0x10
+#define		SQPARERR        	0x08
+#define		ILLOPCODE       	0x04
+#define		DSCTMOUT        	0x02
+
 #define	HCNTRL          		0x05
 #define		SEQ_RESET       	0x80
 #define		POWRDN          	0x40
@@ -1404,22 +529,22 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		FIFOFULL        	0x02
 #define		FIFOEMP         	0x01
 
-#define	SG_CACHE_SHADOW 		0x1b
-#define		ODD_SEG         	0x04
-#define		LAST_SEG        	0x02
-#define		LAST_SEG_DONE   	0x01
-
 #define	ARBCTL          		0x1b
 #define		RESET_HARB      	0x80
 #define		RETRY_SWEN      	0x08
 #define		USE_TIME        	0x07
 
-#define	SG_CACHE_PRE    		0x1b
+#define	SG_CACHE_SHADOW 		0x1b
+#define		ODD_SEG         	0x04
+#define		LAST_SEG        	0x02
+#define		LAST_SEG_DONE   	0x01
 
-#define	LQIN            		0x20
+#define	SG_CACHE_PRE    		0x1b
 
 #define	TYPEPTR         		0x20
 
+#define	LQIN            		0x20
+
 #define	TAGPTR          		0x21
 
 #define	LUNPTR          		0x22
@@ -1479,14 +604,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		SINGLECMD       	0x02
 #define		ABORTPENDING    	0x01
 
-#define	SCSBIST0        		0x39
-#define		GSBISTERR       	0x40
-#define		GSBISTDONE      	0x20
-#define		GSBISTRUN       	0x10
-#define		OSBISTERR       	0x04
-#define		OSBISTDONE      	0x02
-#define		OSBISTRUN       	0x01
-
 #define	LQCTL2          		0x39
 #define		LQIRETRY        	0x80
 #define		LQICONTINUE     	0x40
@@ -1497,10 +614,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		LQOTOIDLE       	0x02
 #define		LQOPAUSE        	0x01
 
-#define	SCSBIST1        		0x3a
-#define		NTBISTERR       	0x04
-#define		NTBISTDONE      	0x02
-#define		NTBISTRUN       	0x01
+#define	SCSBIST0        		0x39
+#define		GSBISTERR       	0x40
+#define		GSBISTDONE      	0x20
+#define		GSBISTRUN       	0x10
+#define		OSBISTERR       	0x04
+#define		OSBISTDONE      	0x02
+#define		OSBISTRUN       	0x01
 
 #define	SCSISEQ0        		0x3a
 #define		TEMODEO         	0x80
@@ -1509,8 +629,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		FORCEBUSFREE    	0x10
 #define		SCSIRSTO        	0x01
 
+#define	SCSBIST1        		0x3a
+#define		NTBISTERR       	0x04
+#define		NTBISTDONE      	0x02
+#define		NTBISTRUN       	0x01
+
 #define	SCSISEQ1        		0x3b
 
+#define	BUSINITID       		0x3c
+
 #define	SXFRCTL0        		0x3c
 #define		DFON            	0x80
 #define		DFPEXP          	0x40
@@ -1519,8 +646,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	DLCOUNT         		0x3c
 
-#define	BUSINITID       		0x3c
-
 #define	SXFRCTL1        		0x3d
 #define		BITBUCKET       	0x80
 #define		ENSACHK         	0x40
@@ -1545,6 +670,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CURRFIFO_1      	0x01
 #define		CURRFIFO_0      	0x00
 
+#define	MULTARGID       		0x40
+
 #define	SCSISIGO        		0x40
 #define		CDO             	0x80
 #define		IOO             	0x40
@@ -1555,8 +682,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		REQO            	0x02
 #define		ACKO            	0x01
 
-#define	MULTARGID       		0x40
-
 #define	SCSISIGI        		0x41
 #define		ATNI            	0x10
 #define		SELI            	0x08
@@ -1603,14 +728,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		ENAB20          	0x04
 #define		SELWIDE         	0x02
 
-#define	CLRSINT0        		0x4b
-#define		CLRSELDO        	0x40
-#define		CLRSELDI        	0x20
-#define		CLRSELINGO      	0x10
-#define		CLRIOERR        	0x08
-#define		CLROVERRUN      	0x04
-#define		CLRSPIORDY      	0x02
-#define		CLRARBDO        	0x01
+#define	SIMODE0         		0x4b
+#define		ENSELDO         	0x40
+#define		ENSELDI         	0x20
+#define		ENSELINGO       	0x10
+#define		ENIOERR         	0x08
+#define		ENOVERRUN       	0x04
+#define		ENSPIORDY       	0x02
+#define		ENARBDO         	0x01
 
 #define	SSTAT0          		0x4b
 #define		TARGET          	0x80
@@ -1622,23 +747,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		SPIORDY         	0x02
 #define		ARBDO           	0x01
 
-#define	SIMODE0         		0x4b
-#define		ENSELDO         	0x40
-#define		ENSELDI         	0x20
-#define		ENSELINGO       	0x10
-#define		ENIOERR         	0x08
-#define		ENOVERRUN       	0x04
-#define		ENSPIORDY       	0x02
-#define		ENARBDO         	0x01
-
-#define	CLRSINT1        		0x4c
-#define		CLRSELTIMEO     	0x80
-#define		CLRATNO         	0x40
-#define		CLRSCSIRSTI     	0x20
-#define		CLRBUSFREE      	0x08
-#define		CLRSCSIPERR     	0x04
-#define		CLRSTRB2FAST    	0x02
-#define		CLRREQINIT      	0x01
+#define	CLRSINT0        		0x4b
+#define		CLRSELDO        	0x40
+#define		CLRSELDI        	0x20
+#define		CLRSELINGO      	0x10
+#define		CLRIOERR        	0x08
+#define		CLROVERRUN      	0x04
+#define		CLRSPIORDY      	0x02
+#define		CLRARBDO        	0x01
 
 #define	SSTAT1          		0x4c
 #define		SELTO           	0x80
@@ -1650,6 +766,20 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		STRB2FAST       	0x02
 #define		REQINIT         	0x01
 
+#define	CLRSINT1        		0x4c
+#define		CLRSELTIMEO     	0x80
+#define		CLRATNO         	0x40
+#define		CLRSCSIRSTI     	0x20
+#define		CLRBUSFREE      	0x08
+#define		CLRSCSIPERR     	0x04
+#define		CLRSTRB2FAST    	0x02
+#define		CLRREQINIT      	0x01
+
+#define	SIMODE2         		0x4d
+#define		ENWIDE_RES      	0x04
+#define		ENSDONE         	0x02
+#define		ENDMADONE       	0x01
+
 #define	SSTAT2          		0x4d
 #define		BUSFREETIME     	0xc0
 #define		NONPACKREQ      	0x20
@@ -1662,11 +792,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		BUSFREE_DFF0    	0x80
 #define		BUSFREE_LQO     	0x40
 
-#define	SIMODE2         		0x4d
-#define		ENWIDE_RES      	0x04
-#define		ENSDONE         	0x02
-#define		ENDMADONE       	0x01
-
 #define	CLRSINT2        		0x4d
 #define		CLRNONPACKREQ   	0x20
 #define		CLRWIDE_RES     	0x04
@@ -1685,10 +810,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	LQISTATE        		0x4e
 
-#define	SOFFCNT         		0x4f
-
 #define	LQOSTATE        		0x4f
 
+#define	SOFFCNT         		0x4f
+
 #define	LQISTAT0        		0x50
 #define		LQIATNQAS       	0x20
 #define		LQICRCT1        	0x10
@@ -1697,14 +822,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		LQIATNLQ        	0x02
 #define		LQIATNCMD       	0x01
 
-#define	CLRLQIINT0      		0x50
-#define		CLRLQIATNQAS    	0x20
-#define		CLRLQICRCT1     	0x10
-#define		CLRLQICRCT2     	0x08
-#define		CLRLQIBADLQT    	0x04
-#define		CLRLQIATNLQ     	0x02
-#define		CLRLQIATNCMD    	0x01
-
 #define	LQIMODE0        		0x50
 #define		ENLQIATNQASK    	0x20
 #define		ENLQICRCT1      	0x10
@@ -1713,6 +830,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		ENLQIATNLQ      	0x02
 #define		ENLQIATNCMD     	0x01
 
+#define	CLRLQIINT0      		0x50
+#define		CLRLQIATNQAS    	0x20
+#define		CLRLQICRCT1     	0x10
+#define		CLRLQICRCT2     	0x08
+#define		CLRLQIBADLQT    	0x04
+#define		CLRLQIATNLQ     	0x02
+#define		CLRLQIATNCMD    	0x01
+
 #define	LQIMODE1        		0x51
 #define		ENLQIPHASE_LQ   	0x80
 #define		ENLQIPHASE_NLQ  	0x40
@@ -1753,25 +878,18 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		LQISTOPCMD      	0x02
 #define		LQIGSAVAIL      	0x01
 
-#define	SSTAT3          		0x53
-#define		NTRAMPERR       	0x02
-#define		OSRAMPERR       	0x01
-
 #define	SIMODE3         		0x53
 #define		ENNTRAMPERR     	0x02
 #define		ENOSRAMPERR     	0x01
 
+#define	SSTAT3          		0x53
+#define		NTRAMPERR       	0x02
+#define		OSRAMPERR       	0x01
+
 #define	CLRSINT3        		0x53
 #define		CLRNTRAMPERR    	0x02
 #define		CLROSRAMPERR    	0x01
 
-#define	LQOSTAT0        		0x54
-#define		LQOTARGSCBPERR  	0x10
-#define		LQOSTOPT2       	0x08
-#define		LQOATNLQ        	0x04
-#define		LQOATNPKT       	0x02
-#define		LQOTCRC         	0x01
-
 #define	CLRLQOINT0      		0x54
 #define		CLRLQOTARGSCBPERR	0x10
 #define		CLRLQOSTOPT2    	0x08
@@ -1779,6 +897,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CLRLQOATNPKT    	0x02
 #define		CLRLQOTCRC      	0x01
 
+#define	LQOSTAT0        		0x54
+#define		LQOTARGSCBPERR  	0x10
+#define		LQOSTOPT2       	0x08
+#define		LQOATNLQ        	0x04
+#define		LQOATNPKT       	0x02
+#define		LQOTCRC         	0x01
+
 #define	LQOMODE0        		0x54
 #define		ENLQOTARGSCBPERR	0x10
 #define		ENLQOSTOPT2     	0x08
@@ -1793,13 +918,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		ENLQOBUSFREE    	0x02
 #define		ENLQOPHACHGINPKT	0x01
 
-#define	LQOSTAT1        		0x55
-#define		LQOINITSCBPERR  	0x10
-#define		LQOSTOPI2       	0x08
-#define		LQOBADQAS       	0x04
-#define		LQOBUSFREE      	0x02
-#define		LQOPHACHGINPKT  	0x01
-
 #define	CLRLQOINT1      		0x55
 #define		CLRLQOINITSCBPERR	0x10
 #define		CLRLQOSTOPI2    	0x08
@@ -1807,6 +925,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CLRLQOBUSFREE   	0x02
 #define		CLRLQOPHACHGINPKT	0x01
 
+#define	LQOSTAT1        		0x55
+#define		LQOINITSCBPERR  	0x10
+#define		LQOSTOPI2       	0x08
+#define		LQOBADQAS       	0x04
+#define		LQOBUSFREE      	0x02
+#define		LQOPHACHGINPKT  	0x01
+
 #define	LQOSTAT2        		0x56
 #define		LQOPKT          	0xe0
 #define		LQOWAITFIFO     	0x10
@@ -1859,8 +984,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CFG4ICMD        	0x02
 #define		CFG4TCMD        	0x01
 
-#define	CURRSCB         		0x5c
-
 #define	SEQIMODE        		0x5c
 #define		ENCTXTDONE      	0x40
 #define		ENSAVEPTRS      	0x20
@@ -1870,6 +993,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		ENCFG4ICMD      	0x02
 #define		ENCFG4TCMD      	0x01
 
+#define	CURRSCB         		0x5c
+
+#define	CRCCONTROL      		0x5d
+#define		CRCVALCHKEN     	0x40
+
 #define	MDFFSTAT        		0x5d
 #define		SHCNTNEGATIVE   	0x40
 #define		SHCNTMINUS1     	0x20
@@ -1879,34 +1007,31 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		DATAINFIFO      	0x02
 #define		FIFOFREE        	0x01
 
-#define	CRCCONTROL      		0x5d
-#define		CRCVALCHKEN     	0x40
-
 #define	DFFTAG          		0x5e
 
-#define	LASTSCB         		0x5e
-
 #define	SCSITEST        		0x5e
 #define		CNTRTEST        	0x08
 #define		SEL_TXPLL_DEBUG 	0x04
 
+#define	LASTSCB         		0x5e
+
 #define	IOPDNCTL        		0x5f
 #define		DISABLE_OE      	0x80
 #define		PDN_IDIST       	0x04
 #define		PDN_DIFFSENSE   	0x01
 
-#define	SHADDR          		0x60
+#define	DGRPCRCI        		0x60
 
 #define	NEGOADDR        		0x60
 
-#define	DGRPCRCI        		0x60
+#define	SHADDR          		0x60
 
 #define	NEGPERIOD       		0x61
 
-#define	PACKCRCI        		0x62
-
 #define	NEGOFFSET       		0x62
 
+#define	PACKCRCI        		0x62
+
 #define	NEGPPROPTS      		0x63
 #define		PPROPT_PACE     	0x08
 #define		PPROPT_QAS      	0x04
@@ -1942,16 +1067,18 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	SHCNT           		0x68
 
-#define	TOWNID          		0x69
-
 #define	PLL960CTL1      		0x69
 
+#define	TOWNID          		0x69
+
 #define	PLL960CNT0      		0x6a
 
 #define	XSIG            		0x6a
 
 #define	SELOID          		0x6b
 
+#define	FAIRNESS        		0x6c
+
 #define	PLL400CTL0      		0x6c
 #define		PLL_VCOSEL      	0x80
 #define		PLL_PWDN        	0x40
@@ -1961,8 +1088,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		PLL_DLPF        	0x02
 #define		PLL_ENFBM       	0x01
 
-#define	FAIRNESS        		0x6c
-
 #define	PLL400CTL1      		0x6d
 #define		PLL_CNTEN       	0x80
 #define		PLL_CNTCLR      	0x40
@@ -1974,25 +1099,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	HADDR           		0x70
 
+#define	HODMAADR        		0x70
+
 #define	PLLDELAY        		0x70
 #define		SPLIT_DROP_REQ  	0x80
 
-#define	HODMAADR        		0x70
+#define	HCNT            		0x78
 
 #define	HODMACNT        		0x78
 
-#define	HCNT            		0x78
-
 #define	HODMAEN         		0x7a
 
-#define	SCBHADDR        		0x7c
-
 #define	SGHADDR         		0x7c
 
-#define	SCBHCNT         		0x84
+#define	SCBHADDR        		0x7c
 
 #define	SGHCNT          		0x84
 
+#define	SCBHCNT         		0x84
+
 #define	DFF_THRSH       		0x88
 #define		WR_DFTHRSH      	0x70
 #define		RD_DFTHRSH      	0x07
@@ -2025,6 +1150,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	CMCRXMSG0       		0x90
 
+#define	OVLYRXMSG0      		0x90
+
+#define	DCHRXMSG0       		0x90
+
 #define	ROENABLE        		0x90
 #define		MSIROEN         	0x20
 #define		OVLYROEN        	0x10
@@ -2033,11 +1162,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		DCH1ROEN        	0x02
 #define		DCH0ROEN        	0x01
 
-#define	OVLYRXMSG0      		0x90
+#define	OVLYRXMSG1      		0x91
 
-#define	DCHRXMSG0       		0x90
+#define	CMCRXMSG1       		0x91
 
-#define	OVLYRXMSG1      		0x91
+#define	DCHRXMSG1       		0x91
 
 #define	NSENABLE        		0x91
 #define		MSINSEN         	0x20
@@ -2047,10 +1176,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		DCH1NSEN        	0x02
 #define		DCH0NSEN        	0x01
 
-#define	CMCRXMSG1       		0x91
-
-#define	DCHRXMSG1       		0x91
-
 #define	DCHRXMSG2       		0x92
 
 #define	CMCRXMSG2       		0x92
@@ -2074,24 +1199,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		TSCSERREN       	0x02
 #define		CMPABCDIS       	0x01
 
+#define	CMCSEQBCNT      		0x94
+
 #define	OVLYSEQBCNT     		0x94
 
 #define	DCHSEQBCNT      		0x94
 
-#define	CMCSEQBCNT      		0x94
-
-#define	CMCSPLTSTAT0    		0x96
-
 #define	DCHSPLTSTAT0    		0x96
 
 #define	OVLYSPLTSTAT0   		0x96
 
-#define	CMCSPLTSTAT1    		0x97
+#define	CMCSPLTSTAT0    		0x96
 
 #define	OVLYSPLTSTAT1   		0x97
 
 #define	DCHSPLTSTAT1    		0x97
 
+#define	CMCSPLTSTAT1    		0x97
+
 #define	SGRXMSG0        		0x98
 #define		CDNUM           	0xf8
 #define		CFNUM           	0x07
@@ -2119,18 +1244,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		TAG_NUM         	0x1f
 #define		RLXORD          	0x10
 
-#define	SGSEQBCNT       		0x9c
-
 #define	SLVSPLTOUTATTR0 		0x9c
 #define		LOWER_BCNT      	0xff
 
+#define	SGSEQBCNT       		0x9c
+
 #define	SLVSPLTOUTATTR1 		0x9d
 #define		CMPLT_DNUM      	0xf8
 #define		CMPLT_FNUM      	0x07
 
-#define	SLVSPLTOUTATTR2 		0x9e
-#define		CMPLT_BNUM      	0xff
-
 #define	SGSPLTSTAT0     		0x9e
 #define		STAETERM        	0x80
 #define		SCBCERR         	0x40
@@ -2141,6 +1263,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		RXSCEMSG        	0x02
 #define		RXSPLTRSP       	0x01
 
+#define	SLVSPLTOUTATTR2 		0x9e
+#define		CMPLT_BNUM      	0xff
+
 #define	SGSPLTSTAT1     		0x9f
 #define		RXDATABUCKET    	0x01
 
@@ -2177,14 +1302,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		CLRPENDMSI      	0x08
 #define		DPR             	0x01
 
+#define	DATA_COUNT_ODD  		0xa7
+
 #define	TARGPCISTAT     		0xa7
 #define		DPE             	0x80
 #define		SSE             	0x40
 #define		STA             	0x08
 #define		TWATERR         	0x02
 
-#define	DATA_COUNT_ODD  		0xa7
-
 #define	SCBPTR          		0xa8
 
 #define	CCSCBACNT       		0xab
@@ -2196,10 +1321,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	CCSGADDR        		0xac
 
-#define	CCSCBADR_BK     		0xac
-
 #define	CCSCBADDR       		0xac
 
+#define	CCSCBADR_BK     		0xac
+
 #define	CMC_RAMBIST     		0xad
 #define		SG_ELEMENT_SIZE 	0x80
 #define		SCBRAMBIST_FAIL 	0x40
@@ -2253,9 +1378,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define	SEEDAT          		0xbc
 
 #define	SEECTL          		0xbe
+#define		SEEOP_EWDS      	0x40
 #define		SEEOP_WALL      	0x40
 #define		SEEOP_EWEN      	0x40
-#define		SEEOP_EWDS      	0x40
 #define		SEEOPCODE       	0x70
 #define		SEERST          	0x02
 #define		SEESTART        	0x01
@@ -2272,25 +1397,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	SCBCNT          		0xbf
 
-#define	DFWADDR         		0xc0
-
 #define	DSPFLTRCTL      		0xc0
 #define		FLTRDISABLE     	0x20
 #define		EDGESENSE       	0x10
 #define		DSPFCNTSEL      	0x0f
 
+#define	DFWADDR         		0xc0
+
 #define	DSPDATACTL      		0xc1
 #define		BYPASSENAB      	0x80
 #define		DESQDIS         	0x10
 #define		RCVROFFSTDIS    	0x04
 #define		XMITOFFSTDIS    	0x02
 
-#define	DFRADDR         		0xc2
-
 #define	DSPREQCTL       		0xc2
 #define		MANREQCTL       	0xc0
 #define		MANREQDLY       	0x3f
 
+#define	DFRADDR         		0xc2
+
 #define	DSPACKCTL       		0xc3
 #define		MANACKCTL       	0xc0
 #define		MANACKDLY       	0x3f
@@ -2311,14 +1436,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	WRTBIASCALC     		0xc7
 
-#define	RCVRBIASCALC    		0xc8
-
 #define	DFPTRS          		0xc8
 
-#define	SKEWCALC        		0xc9
+#define	RCVRBIASCALC    		0xc8
 
 #define	DFBKPTR         		0xc9
 
+#define	SKEWCALC        		0xc9
+
 #define	DFDBCTL         		0xcb
 #define		DFF_CIO_WR_RDY  	0x20
 #define		DFF_CIO_RD_RDY  	0x10
@@ -2403,12 +1528,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	ACCUM_SAVE      		0xfa
 
-#define	WAITING_SCB_TAILS		0x100
-
 #define	AHD_PCI_CONFIG_BASE		0x100
 
 #define	SRAM_BASE       		0x100
 
+#define	WAITING_SCB_TAILS		0x100
+
 #define	WAITING_TID_HEAD		0x120
 
 #define	WAITING_TID_TAIL		0x122
@@ -2437,8 +1562,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define		PRELOADEN       	0x80
 #define		WIDEODD         	0x40
 #define		SCSIEN          	0x20
-#define		SDMAEN          	0x10
 #define		SDMAENACK       	0x10
+#define		SDMAEN          	0x10
 #define		HDMAEN          	0x08
 #define		HDMAENACK       	0x08
 #define		DIRECTION       	0x04
@@ -2536,12 +1661,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define	MK_MESSAGE_SCSIID		0x162
 
-#define	SCB_BASE        		0x180
-
 #define	SCB_RESIDUAL_DATACNT		0x180
 #define	SCB_CDB_STORE   		0x180
 #define	SCB_HOST_CDB_PTR		0x180
 
+#define	SCB_BASE        		0x180
+
 #define	SCB_RESIDUAL_SGPTR		0x184
 #define		SG_ADDR_MASK    	0xf8
 #define		SG_OVERRUN_RESID	0x02
@@ -2609,77 +1734,77 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define	SCB_DISCONNECTED_LISTS		0x1b8
 
 
+#define	STIMESEL_SHIFT	0x03
+#define	STIMESEL_MIN	0x18
+#define	INVALID_ADDR	0x80
+#define	CMD_GROUP_CODE_SHIFT	0x05
+#define	AHD_PRECOMP_MASK	0x07
+#define	TARGET_DATA_IN	0x01
+#define	SEEOP_EWEN_ADDR	0xc0
+#define	NUMDSPS 	0x14
+#define	DST_MODE_SHIFT	0x04
+#define	CCSCBADDR_MAX	0x80
+#define	AHD_ANNEXCOL_PER_DEV0	0x04
+#define	TARGET_CMD_CMPLT	0xfe
+#define	SEEOP_WRAL_ADDR	0x40
+#define	BUS_8_BIT	0x00
 #define	AHD_TIMER_MAX_US	0x18ffe7
 #define	AHD_TIMER_MAX_TICKS	0xffff
 #define	AHD_SENSE_BUFSIZE	0x100
-#define	BUS_8_BIT	0x00
-#define	TARGET_CMD_CMPLT	0xfe
-#define	SEEOP_WRAL_ADDR	0x40
-#define	AHD_AMPLITUDE_DEF	0x07
-#define	AHD_PRECOMP_CUTBACK_37	0x07
 #define	AHD_PRECOMP_SHIFT	0x00
+#define	AHD_PRECOMP_CUTBACK_37	0x07
 #define	AHD_ANNEXCOL_PRECOMP_SLEW	0x04
-#define	AHD_TIMER_US_PER_TICK	0x19
-#define	SCB_TRANSFER_SIZE_FULL_LUN	0x38
+#define	AHD_AMPLITUDE_DEF	0x07
+#define	WRTBIASCTL_HP_DEFAULT	0x00
+#define	TID_SHIFT	0x04
 #define	STATUS_QUEUE_FULL	0x28
 #define	STATUS_BUSY	0x08
-#define	MAX_OFFSET_NON_PACED	0x7f
+#define	SEEOP_EWDS_ADDR	0x00
+#define	SCB_TRANSFER_SIZE_FULL_LUN	0x38
+#define	MK_MESSAGE_BIT_OFFSET	0x04
 #define	MAX_OFFSET_PACED	0xfe
-#define	BUS_32_BIT	0x02
+#define	MAX_OFFSET_NON_PACED	0x7f
+#define	LUNLEN_SINGLE_LEVEL_LUN	0x0f
 #define	CCSGADDR_MAX	0x80
-#define	TID_SHIFT	0x04
-#define	MK_MESSAGE_BIT_OFFSET	0x04
-#define	WRTBIASCTL_HP_DEFAULT	0x00
-#define	SEEOP_EWDS_ADDR	0x00
-#define	AHD_AMPLITUDE_SHIFT	0x00
-#define	AHD_AMPLITUDE_MASK	0x07
-#define	AHD_ANNEXCOL_AMPLITUDE	0x06
-#define	AHD_SLEWRATE_DEF_REVA	0x08
+#define	B_CURRFIFO_0	0x02
+#define	BUS_32_BIT	0x02
+#define	AHD_TIMER_US_PER_TICK	0x19
 #define	AHD_SLEWRATE_SHIFT	0x03
 #define	AHD_SLEWRATE_MASK	0x78
+#define	AHD_SLEWRATE_DEF_REVA	0x08
 #define	AHD_PRECOMP_CUTBACK_29	0x06
 #define	AHD_NUM_PER_DEV_ANNEXCOLS	0x04
-#define	B_CURRFIFO_0	0x02
-#define	LUNLEN_SINGLE_LEVEL_LUN	0x0f
-#define	NVRAM_SCB_OFFSET	0x2c
+#define	AHD_ANNEXCOL_AMPLITUDE	0x06
+#define	AHD_AMPLITUDE_SHIFT	0x00
+#define	AHD_AMPLITUDE_MASK	0x07
+#define	STIMESEL_BUG_ADJ	0x08
 #define	STATUS_PKT_SENSE	0xff
-#define	CMD_GROUP_CODE_SHIFT	0x05
+#define	SRC_MODE_SHIFT	0x00
+#define	SEEOP_ERAL_ADDR	0x80
+#define	NVRAM_SCB_OFFSET	0x2c
 #define	MAX_OFFSET_PACED_BUG	0x7f
-#define	STIMESEL_BUG_ADJ	0x08
-#define	STIMESEL_MIN	0x18
-#define	STIMESEL_SHIFT	0x03
 #define	CCSGRAM_MAXSEGS	0x10
-#define	INVALID_ADDR	0x80
-#define	SEEOP_ERAL_ADDR	0x80
 #define	AHD_SLEWRATE_DEF_REVB	0x08
 #define	AHD_PRECOMP_CUTBACK_17	0x04
-#define	AHD_PRECOMP_MASK	0x07
-#define	SRC_MODE_SHIFT	0x00
-#define	PKT_OVERRUN_BUFSIZE	0x200
 #define	SCB_TRANSFER_SIZE_1BYTE_LUN	0x30
-#define	TARGET_DATA_IN	0x01
-#define	HOST_MSG	0xff
+#define	PKT_OVERRUN_BUFSIZE	0x200
 #define	MAX_OFFSET	0xfe
+#define	HOST_MSG	0xff
 #define	BUS_16_BIT	0x01
-#define	CCSCBADDR_MAX	0x80
-#define	NUMDSPS 	0x14
-#define	SEEOP_EWEN_ADDR	0xc0
-#define	AHD_ANNEXCOL_PER_DEV0	0x04
-#define	DST_MODE_SHIFT	0x04
 
 
 /* Downloaded Constant Definitions */
+#define	SG_SIZEOF	0x04
+#define	SG_PREFETCH_ALIGN_MASK	0x02
+#define	SG_PREFETCH_CNT_LIMIT	0x01
 #define	CACHELINE_MASK	0x07
 #define	SCB_TRANSFER_SIZE	0x06
 #define	PKT_OVERRUN_BUFOFFSET	0x05
-#define	SG_SIZEOF	0x04
 #define	SG_PREFETCH_ADDR_MASK	0x03
-#define	SG_PREFETCH_ALIGN_MASK	0x02
-#define	SG_PREFETCH_CNT_LIMIT	0x01
 #define	SG_PREFETCH_CNT	0x00
 #define	DOWNLOAD_CONST_COUNT	0x08
 
 
 /* Exported Labels */
-#define	LABEL_seq_isr 	0x28f
 #define	LABEL_timer_isr	0x28b
+#define	LABEL_seq_isr 	0x28f
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
index f5ea715d6ac33af433688a62669506ebb67303cf..2e0c58905b9e89cedd9be9918d19e459e774b6bf 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -234,6 +234,23 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
 	    0x49, regvalue, cur_col, wrap));
 }
 
+static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
+	{ "ENARBDO",		0x01, 0x01 },
+	{ "ENSPIORDY",		0x02, 0x02 },
+	{ "ENOVERRUN",		0x04, 0x04 },
+	{ "ENIOERR",		0x08, 0x08 },
+	{ "ENSELINGO",		0x10, 0x10 },
+	{ "ENSELDI",		0x20, 0x20 },
+	{ "ENSELDO",		0x40, 0x40 }
+};
+
+int
+ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SIMODE0_parse_table, 7, "SIMODE0",
+	    0x4b, regvalue, cur_col, wrap));
+}
+
 static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
 	{ "ARBDO",		0x01, 0x01 },
 	{ "SPIORDY",		0x02, 0x02 },
@@ -252,23 +269,6 @@ ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
 	    0x4b, regvalue, cur_col, wrap));
 }
 
-static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
-	{ "ENARBDO",		0x01, 0x01 },
-	{ "ENSPIORDY",		0x02, 0x02 },
-	{ "ENOVERRUN",		0x04, 0x04 },
-	{ "ENIOERR",		0x08, 0x08 },
-	{ "ENSELINGO",		0x10, 0x10 },
-	{ "ENSELDI",		0x20, 0x20 },
-	{ "ENSELDO",		0x40, 0x40 }
-};
-
-int
-ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-	return (ahd_print_register(SIMODE0_parse_table, 7, "SIMODE0",
-	    0x4b, regvalue, cur_col, wrap));
-}
-
 static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
 	{ "REQINIT",		0x01, 0x01 },
 	{ "STRB2FAST",		0x02, 0x02 },
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 64ab9eaec428c53f853ae4ef5e8e749f3ac6c188..381846164003129458d4bab4020587f4276c67f6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -7340,7 +7340,6 @@ ahc_dump_card_state(struct ahc_softc *ahc)
 		printk("\n");
 	}
 
-	ahc_platform_dump_card_state(ahc);
 	printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
 	ahc_outb(ahc, SCBPTR, saved_scbptr);
 	if (paused == 0)
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index fc6a83188c1e0fab6a2bb4583eda2a2d6771c619..acd687f4554e7349416cb098d881dbf7d018ec37 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -2329,11 +2329,6 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
 	return (retval);
 }
 
-void
-ahc_platform_dump_card_state(struct ahc_softc *ahc)
-{
-}
-
 static void ahc_linux_set_width(struct scsi_target *starget, int width)
 {
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 54c702864103a418b176a5eabba6cbafc8730613..f8489078f003a4ffd0a6c9752b033a8e0b184198 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -688,7 +688,6 @@ void	ahc_done(struct ahc_softc*, struct scb*);
 void	ahc_send_async(struct ahc_softc *, char channel,
 		       u_int target, u_int lun, ac_code);
 void	ahc_print_path(struct ahc_softc *, struct scb *);
-void	ahc_platform_dump_card_state(struct ahc_softc *ahc);
 
 #ifdef CONFIG_PCI
 #define AHC_PCI_CONFIG 1
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
index e821082a4f47a23db505a829b47c1cfe47eafe26..473039df0ed5c887d67a8f5a9095f63f983d1cbf 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
@@ -244,8 +244,6 @@ ahc_reg_print_t ahc_scb_tag_print;
 
 #define	SCSIDATH        		0x07
 
-#define	STCNT           		0x08
-
 #define	OPTIONMODE      		0x08
 #define		OPTIONMODE_DEFAULTS	0x03
 #define		AUTORATEEN      	0x80
@@ -257,6 +255,8 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define		AUTO_MSGOUT_DE  	0x02
 #define		DIS_MSGIN_DUALEDGE	0x01
 
+#define	STCNT           		0x08
+
 #define	TARGCRCCNT      		0x0a
 
 #define	CLRSINT0        		0x0b
@@ -365,8 +365,6 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define		ALTSTIM         	0x20
 #define		DFLTTID         	0x10
 
-#define	TARGID          		0x1b
-
 #define	SPIOCAP         		0x1b
 #define		SOFT1           	0x80
 #define		SOFT0           	0x40
@@ -377,12 +375,14 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define		ROM             	0x02
 #define		SSPIOCPS        	0x01
 
+#define	TARGID          		0x1b
+
 #define	BRDCTL          		0x1d
 #define		BRDDAT7         	0x80
 #define		BRDDAT6         	0x40
 #define		BRDDAT5         	0x20
-#define		BRDDAT4         	0x10
 #define		BRDSTB          	0x10
+#define		BRDDAT4         	0x10
 #define		BRDDAT3         	0x08
 #define		BRDCS           	0x08
 #define		BRDDAT2         	0x04
@@ -406,8 +406,8 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define		DIAGLEDEN       	0x80
 #define		DIAGLEDON       	0x40
 #define		AUTOFLUSHDIS    	0x20
-#define		ENAB40          	0x08
 #define		SELBUSB         	0x08
+#define		ENAB40          	0x08
 #define		ENAB20          	0x04
 #define		SELWIDE         	0x02
 #define		XCVR            	0x01
@@ -730,8 +730,8 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define	SCB_BASE        		0xa0
 
 #define	SCB_CDB_PTR     		0xa0
-#define	SCB_RESIDUAL_DATACNT		0xa0
 #define	SCB_CDB_STORE   		0xa0
+#define	SCB_RESIDUAL_DATACNT		0xa0
 
 #define	SCB_RESIDUAL_SGPTR		0xa4
 
@@ -756,8 +756,8 @@ ahc_reg_print_t ahc_scb_tag_print;
 
 #define	SCB_CONTROL     		0xb8
 #define		SCB_TAG_TYPE    	0x03
-#define		STATUS_RCVD     	0x80
 #define		TARGET_SCB      	0x80
+#define		STATUS_RCVD     	0x80
 #define		DISCENB         	0x40
 #define		TAG_ENB         	0x20
 #define		MK_MESSAGE      	0x10
@@ -872,40 +872,40 @@ ahc_reg_print_t ahc_scb_tag_print;
 #define	SG_CACHE_PRE    		0xfc
 
 
+#define	TARGET_CMD_CMPLT	0xfe
 #define	MAX_OFFSET_ULTRA2	0x7f
 #define	MAX_OFFSET_16BIT	0x08
 #define	BUS_8_BIT	0x00
-#define	TARGET_CMD_CMPLT	0xfe
+#define	TID_SHIFT	0x04
 #define	STATUS_QUEUE_FULL	0x28
 #define	STATUS_BUSY	0x08
-#define	MAX_OFFSET_8BIT	0x0f
-#define	BUS_32_BIT	0x02
-#define	CCSGADDR_MAX	0x80
-#define	TID_SHIFT	0x04
 #define	SCB_DOWNLOAD_SIZE_64	0x30
+#define	MAX_OFFSET_8BIT	0x0f
 #define	HOST_MAILBOX_SHIFT	0x04
-#define	CMD_GROUP_CODE_SHIFT	0x05
-#define	CCSGRAM_MAXSEGS	0x10
-#define	SCB_LIST_NULL	0xff
+#define	CCSGADDR_MAX	0x80
+#define	BUS_32_BIT	0x02
 #define	SG_SIZEOF	0x08
-#define	SCB_DOWNLOAD_SIZE	0x20
 #define	SEQ_MAILBOX_SHIFT	0x00
+#define	SCB_LIST_NULL	0xff
+#define	SCB_DOWNLOAD_SIZE	0x20
+#define	CMD_GROUP_CODE_SHIFT	0x05
+#define	CCSGRAM_MAXSEGS	0x10
 #define	TARGET_DATA_IN	0x01
-#define	HOST_MSG	0xff
+#define	STACK_SIZE	0x04
+#define	SCB_UPLOAD_SIZE	0x20
 #define	MAX_OFFSET	0x7f
+#define	HOST_MSG	0xff
 #define	BUS_16_BIT	0x01
-#define	SCB_UPLOAD_SIZE	0x20
-#define	STACK_SIZE	0x04
 
 
 /* Downloaded Constant Definitions */
 #define	INVERTED_CACHESIZE_MASK	0x03
-#define	SG_PREFETCH_ADDR_MASK	0x06
 #define	SG_PREFETCH_ALIGN_MASK	0x05
+#define	SG_PREFETCH_ADDR_MASK	0x06
 #define	QOUTFIFO_OFFSET	0x00
 #define	SG_PREFETCH_CNT	0x04
-#define	CACHESIZE_MASK	0x02
 #define	QINFIFO_OFFSET	0x01
+#define	CACHESIZE_MASK	0x02
 #define	DOWNLOAD_CONST_COUNT	0x07
 
 
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index a14ba7a6b81e9150a55b36aecf3e93d0f85f1d66..6c838865ac5a7b32fd853aa36a07611e9809adc3 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -70,7 +70,7 @@ static struct scsi_host_template aic94xx_sht = {
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler	= sas_eh_device_reset_handler,
-	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.eh_target_reset_handler	= sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 	.track_queue_depth	= 1,
@@ -956,11 +956,11 @@ static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time)
 	return 1;
 }
 
-static ssize_t asd_version_show(struct device_driver *driver, char *buf)
+static ssize_t version_show(struct device_driver *driver, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION);
 }
-static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL);
+static DRIVER_ATTR_RO(version);
 
 static int asd_create_driver_attrs(struct device_driver *driver)
 {
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 9c86481f779f7901e3a615f4f24aa100ee4195d5..259d9c20bf258ba76ea456dd7552c729d29ef604 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -190,7 +190,7 @@ static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
 	return 1;
 }
 
-static struct bin_attribute arcmsr_sysfs_message_read_attr = {
+static const struct bin_attribute arcmsr_sysfs_message_read_attr = {
 	.attr = {
 		.name = "mu_read",
 		.mode = S_IRUSR ,
@@ -199,7 +199,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
 	.read = arcmsr_sysfs_iop_message_read,
 };
 
-static struct bin_attribute arcmsr_sysfs_message_write_attr = {
+static const struct bin_attribute arcmsr_sysfs_message_write_attr = {
 	.attr = {
 		.name = "mu_write",
 		.mode = S_IWUSR,
@@ -208,7 +208,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
 	.write = arcmsr_sysfs_iop_message_write,
 };
 
-static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
+static const struct bin_attribute arcmsr_sysfs_message_clear_attr = {
 	.attr = {
 		.name = "mu_clear",
 		.mode = S_IWUSR,
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 12b88294d667dba885c565591560657e554b7cd6..690816f3c6af9a67ff38e801a4e9c440b763f114 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2725,23 +2725,24 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
  * Params   : SCpnt  - command causing reset
  * Returns  : one of SCSI_RESET_ macros
  */
-int acornscsi_bus_reset(struct scsi_cmnd *SCpnt)
+int acornscsi_host_reset(struct Scsi_Host *shpnt)
 {
-	AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
+	AS_Host *host = (AS_Host *)shpnt->hostdata;
 	struct scsi_cmnd *SCptr;
     
     host->stats.resets += 1;
 
 #if (DEBUG & DEBUG_RESET)
     {
-	int asr, ssr;
+	int asr, ssr, devidx;
 
 	asr = sbic_arm_read(host, SBIC_ASR);
 	ssr = sbic_arm_read(host, SBIC_SSR);
 
 	printk(KERN_WARNING "acornscsi_reset: ");
 	print_sbic_status(asr, ssr, host->scsi.phase);
-	acornscsi_dumplog(host, SCpnt->device->id);
+	for (devidx = 0; devidx < 9; devidx ++) {
+	    acornscsi_dumplog(host, devidx);
     }
 #endif
 
@@ -2884,7 +2885,7 @@ static struct scsi_host_template acornscsi_template = {
 	.info			= acornscsi_info,
 	.queuecommand		= acornscsi_queuecmd,
 	.eh_abort_handler	= acornscsi_abort,
-	.eh_bus_reset_handler	= acornscsi_bus_reset,
+	.eh_host_reset_handler	= acornscsi_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index a87b99c7fb9a7596810b3a7c4ecdddb4b3ff294f..ae1d809904fb7f63526bfd882443eb87fd5bb7e8 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -216,7 +216,7 @@ static struct scsi_host_template cumanascsi_template = {
 	.info			= cumanascsi_info,
 	.queuecommand		= cumanascsi_queue_command,
 	.eh_abort_handler	= NCR5380_abort,
-	.eh_bus_reset_handler	= NCR5380_bus_reset,
+	.eh_host_reset_handler	= NCR5380_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 6be6666534d4f662ff9e32cd8df1538d0c107515..05b7f755499b1dcad1c90cb353d9758299ac15f8 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -105,7 +105,7 @@ static struct scsi_host_template oakscsi_template = {
 	.info			= oakscsi_info,
 	.queuecommand		= oakscsi_queue_command,
 	.eh_abort_handler	= NCR5380_abort,
-	.eh_bus_reset_handler	= NCR5380_bus_reset,
+	.eh_host_reset_handler	= NCR5380_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a75feebe6ad6252ad20a89adf7ea0f1b1581defa..89f5154c40b6ec1a61fb259ecd7f20c7441107c3 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -671,7 +671,7 @@ static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value)
 
 #include "NCR5380.c"
 
-static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
+static int atari_scsi_host_reset(struct scsi_cmnd *cmd)
 {
 	int rv;
 	unsigned long flags;
@@ -688,7 +688,7 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
 		atari_dma_orig_addr = NULL;
 	}
 
-	rv = NCR5380_bus_reset(cmd);
+	rv = NCR5380_host_reset(cmd);
 
 	/* The 5380 raises its IRQ line while _RST is active but the ST DMA
 	 * "lock" has been released so this interrupt may end up handled by
@@ -711,7 +711,7 @@ static struct scsi_host_template atari_scsi_template = {
 	.info			= atari_scsi_info,
 	.queuecommand		= atari_scsi_queue_command,
 	.eh_abort_handler	= atari_scsi_abort,
-	.eh_bus_reset_handler	= atari_scsi_bus_reset,
+	.eh_host_reset_handler	= atari_scsi_host_reset,
 	.this_id		= 7,
 	.cmd_per_lun		= 2,
 	.use_clustering		= DISABLE_CLUSTERING,
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 97dca4681784fc4b2cc3cdd57a0587d77a4ab5bc..43a80ce5ce6a9fc9955395f4ed3b556324516626 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -82,8 +82,8 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
 		return NULL;
 	sess = cls_session->dd_data;
 	beiscsi_sess = sess->dd_data;
-	beiscsi_sess->bhs_pool =  pci_pool_create("beiscsi_bhs_pool",
-						   phba->pcidev,
+	beiscsi_sess->bhs_pool =  dma_pool_create("beiscsi_bhs_pool",
+						   &phba->pcidev->dev,
 						   sizeof(struct be_cmd_bhs),
 						   64, 0);
 	if (!beiscsi_sess->bhs_pool)
@@ -108,7 +108,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
 	struct beiscsi_session *beiscsi_sess = sess->dd_data;
 
 	printk(KERN_INFO "In beiscsi_session_destroy\n");
-	pci_pool_destroy(beiscsi_sess->bhs_pool);
+	dma_pool_destroy(beiscsi_sess->bhs_pool);
 	iscsi_session_teardown(cls_session);
 }
 
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index f862332261f82230a9fec82531c47d3f667f9126..b4542e7e2ad5b77cb910f25554ba98d3663fb1cb 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4257,7 +4257,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
 	if (io_task->cmd_bhs) {
-		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
+		dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
 			      io_task->bhs_pa.u.a64.address);
 		io_task->cmd_bhs = NULL;
 		task->hdr = NULL;
@@ -4374,7 +4374,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
 	dma_addr_t paddr;
 
-	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
+	io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool,
 					  GFP_ATOMIC, &paddr);
 	if (!io_task->cmd_bhs)
 		return -ENOMEM;
@@ -4501,7 +4501,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
 	if (io_task->pwrb_handle)
 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
 	io_task->pwrb_handle = NULL;
-	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
+	dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
 		      io_task->bhs_pa.u.a64.address);
 	io_task->cmd_bhs = NULL;
 	return -ENOMEM;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 338dbe0800c16e6d0298636d1d5d45b7e9b56c18..81ce3ffda968e79c19b16f031bb4b8899bf3a448 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -438,7 +438,7 @@ struct beiscsi_hba {
 	 test_bit(BEISCSI_HBA_ONLINE, &phba->state))
 
 struct beiscsi_session {
-	struct pci_pool *bhs_pool;
+	struct dma_pool *bhs_pool;
 };
 
 /**
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 7eb0eef18fddfe8fed96f8d7a058146b2c85def3..24e657a4ec80df3caf82eff0b284288acc875a28 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -373,32 +373,28 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
 }
 
 /*
- * Scsi_Host template entry, resets the bus and abort all commands.
+ * Scsi_Host template entry, resets the target and abort all commands.
  */
 static int
-bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
+bfad_im_reset_target_handler(struct scsi_cmnd *cmnd)
 {
 	struct Scsi_Host *shost = cmnd->device->host;
+	struct scsi_target *starget = scsi_target(cmnd->device);
 	struct bfad_im_port_s *im_port =
 				(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_itnim_s   *itnim;
 	unsigned long   flags;
-	u32        i, rc, err_cnt = 0;
+	u32        rc, rtn = FAILED;
 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
 	enum bfi_tskim_status task_status;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	for (i = 0; i < MAX_FCP_TARGET; i++) {
-		itnim = bfad_get_itnim(im_port, i);
-		if (itnim) {
-			cmnd->SCp.ptr = (char *)&wq;
-			rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
-			if (rc != BFA_STATUS_OK) {
-				err_cnt++;
-				continue;
-			}
-
+	itnim = bfad_get_itnim(im_port, starget->id);
+	if (itnim) {
+		cmnd->SCp.ptr = (char *)&wq;
+		rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
+		if (rc == BFA_STATUS_OK) {
 			/* wait target reset to complete */
 			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 			wait_event(wq, test_bit(IO_DONE_BIT,
@@ -406,20 +402,17 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
 			spin_lock_irqsave(&bfad->bfad_lock, flags);
 
 			task_status = cmnd->SCp.Status >> 1;
-			if (task_status != BFI_TSKIM_STS_OK) {
+			if (task_status != BFI_TSKIM_STS_OK)
 				BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 					"target reset failure,"
 					" status: %d\n", task_status);
-				err_cnt++;
-			}
+			else
+				rtn = SUCCESS;
 		}
 	}
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-	if (err_cnt)
-		return FAILED;
-
-	return SUCCESS;
+	return rtn;
 }
 
 /*
@@ -816,7 +809,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
 	.eh_timed_out = fc_eh_timed_out,
 	.eh_abort_handler = bfad_im_abort_handler,
 	.eh_device_reset_handler = bfad_im_reset_lun_handler,
-	.eh_bus_reset_handler = bfad_im_reset_bus_handler,
+	.eh_target_reset_handler = bfad_im_reset_target_handler,
 
 	.slave_alloc = bfad_im_slave_alloc,
 	.slave_configure = bfad_im_slave_configure,
@@ -839,7 +832,7 @@ struct scsi_host_template bfad_im_vport_template = {
 	.eh_timed_out = fc_eh_timed_out,
 	.eh_abort_handler = bfad_im_abort_handler,
 	.eh_device_reset_handler = bfad_im_reset_lun_handler,
-	.eh_bus_reset_handler = bfad_im_reset_bus_handler,
+	.eh_target_reset_handler = bfad_im_reset_target_handler,
 
 	.slave_alloc = bfad_im_slave_alloc,
 	.slave_configure = bfad_im_slave_configure,
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 7e007e142aab8888d35e4c9d2021d1ccb514e4b7..901a31632493c1c83d95216e65c14ac14e457f48 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -539,7 +539,6 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd);
-int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd);
 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
 void bnx2fc_rport_event_handler(struct fc_lport *lport,
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index dad959fcf6d8e2513369437e1bbd175f582daaad..c535c52e72e53398eab8be83e351454b9540691f 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -105,6 +105,7 @@ do {									\
 static struct class * ch_sysfs_class;
 
 typedef struct {
+	struct kref         ref;
 	struct list_head    list;
 	int                 minor;
 	char                name[8];
@@ -563,13 +564,23 @@ static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest)
 
 /* ------------------------------------------------------------------------ */
 
+static void ch_destroy(struct kref *ref)
+{
+	scsi_changer *ch = container_of(ref, scsi_changer, ref);
+
+	kfree(ch->dt);
+	kfree(ch);
+}
+
 static int
 ch_release(struct inode *inode, struct file *file)
 {
 	scsi_changer *ch = file->private_data;
 
 	scsi_device_put(ch->device);
+	ch->device = NULL;
 	file->private_data = NULL;
+	kref_put(&ch->ref, ch_destroy);
 	return 0;
 }
 
@@ -588,6 +599,7 @@ ch_open(struct inode *inode, struct file *file)
 		mutex_unlock(&ch_mutex);
 		return -ENXIO;
 	}
+	kref_get(&ch->ref);
 	spin_unlock(&ch_index_lock);
 
 	file->private_data = ch;
@@ -935,8 +947,11 @@ static int ch_probe(struct device *dev)
 	}
 
 	mutex_init(&ch->lock);
+	kref_init(&ch->ref);
 	ch->device = sd;
-	ch_readconfig(ch);
+	ret = ch_readconfig(ch);
+	if (ret)
+		goto destroy_dev;
 	if (init)
 		ch_init_elem(ch);
 
@@ -944,6 +959,8 @@ static int ch_probe(struct device *dev)
 	sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
 
 	return 0;
+destroy_dev:
+	device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
 remove_idr:
 	idr_remove(&ch_index_idr, ch->minor);
 free_ch:
@@ -960,8 +977,7 @@ static int ch_remove(struct device *dev)
 	spin_unlock(&ch_index_lock);
 
 	device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
-	kfree(ch->dt);
-	kfree(ch);
+	kref_put(&ch->ref, ch_destroy);
 	return 0;
 }
 
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 9acb89538e29f28b9173dcb532cae18c8a54042d..667046419b19d3e87b09f31d0dfde6847b7d50a2 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -465,7 +465,7 @@ struct csio_hw {
 	struct csio_pport	pport[CSIO_MAX_PPORTS];	/* Ports (XGMACs) */
 	struct csio_hw_params	params;			/* Hw parameters */
 
-	struct pci_pool		*scsi_pci_pool;		/* PCI pool for SCSI */
+	struct dma_pool		*scsi_dma_pool;		/* DMA pool for SCSI */
 	mempool_t		*mb_mempool;		/* Mailbox memory pool*/
 	mempool_t		*rnode_mempool;		/* rnode memory pool */
 
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index dcd074169aa9b7794850c7034a1208353ba17266..28a9c7d706cb0ec3d63c2ab641d71fc687e90023 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -485,9 +485,10 @@ csio_resource_alloc(struct csio_hw *hw)
 	if (!hw->rnode_mempool)
 		goto err_free_mb_mempool;
 
-	hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
-					    CSIO_SCSI_RSP_LEN, 8, 0);
-	if (!hw->scsi_pci_pool)
+	hw->scsi_dma_pool = dma_pool_create("csio_scsi_dma_pool",
+					    &hw->pdev->dev, CSIO_SCSI_RSP_LEN,
+					    8, 0);
+	if (!hw->scsi_dma_pool)
 		goto err_free_rn_pool;
 
 	return 0;
@@ -505,8 +506,8 @@ csio_resource_alloc(struct csio_hw *hw)
 static void
 csio_resource_free(struct csio_hw *hw)
 {
-	pci_pool_destroy(hw->scsi_pci_pool);
-	hw->scsi_pci_pool = NULL;
+	dma_pool_destroy(hw->scsi_dma_pool);
+	hw->scsi_dma_pool = NULL;
 	mempool_destroy(hw->rnode_mempool);
 	hw->rnode_mempool = NULL;
 	mempool_destroy(hw->mb_mempool);
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index a1ff75f1384f4e96e3c6e8456a78513659533135..dab0d3f9bee139934eba2d551ece7db13d8486fa 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2445,7 +2445,7 @@ csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
 
 		/* Allocate Dma buffers for Response Payload */
 		dma_buf = &ioreq->dma_buf;
-		dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL,
+		dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL,
 						&dma_buf->paddr);
 		if (!dma_buf->vaddr) {
 			csio_err(hw,
@@ -2485,7 +2485,7 @@ csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
 		ioreq = (struct csio_ioreq *)tmp;
 
 		dma_buf = &ioreq->dma_buf;
-		pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr,
+		dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr,
 			      dma_buf->paddr);
 
 		kfree(ioreq);
@@ -2516,7 +2516,7 @@ csio_scsim_exit(struct csio_scsim *scm)
 		ioreq = (struct csio_ioreq *)tmp;
 
 		dma_buf = &ioreq->dma_buf;
-		pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr,
+		dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr,
 			      dma_buf->paddr);
 
 		kfree(ioreq);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 1a4cfa562a6087206ee2f68eaf3fc63988d1934a..512c8f1ea5b06efd4d42c2eed805be9e6a23d533 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -585,19 +585,21 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
 
 static struct rtable *find_route_ipv4(struct flowi4 *fl4,
 				      __be32 saddr, __be32 daddr,
-				      __be16 sport, __be16 dport, u8 tos)
+				      __be16 sport, __be16 dport, u8 tos,
+				      int ifindex)
 {
 	struct rtable *rt;
 
 	rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr,
-				   dport, sport, IPPROTO_TCP, tos, 0);
+				   dport, sport, IPPROTO_TCP, tos, ifindex);
 	if (IS_ERR(rt))
 		return NULL;
 
 	return rt;
 }
 
-static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
+static struct cxgbi_sock *
+cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
 {
 	struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
 	struct dst_entry *dst;
@@ -611,7 +613,8 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
 	int port = 0xFFFF;
 	int err = 0;
 
-	rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
+	rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0,
+			     daddr->sin_port, 0, ifindex);
 	if (!rt) {
 		pr_info("no route to ipv4 0x%x, port %u.\n",
 			be32_to_cpu(daddr->sin_addr.s_addr),
@@ -693,11 +696,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
 
 #if IS_ENABLED(CONFIG_IPV6)
 static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
-					const struct in6_addr *daddr)
+					const struct in6_addr *daddr,
+					int ifindex)
 {
 	struct flowi6 fl;
 
 	memset(&fl, 0, sizeof(fl));
+	fl.flowi6_oif = ifindex;
 	if (saddr)
 		memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
 	if (daddr)
@@ -705,7 +710,8 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
 	return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
 }
 
-static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
+static struct cxgbi_sock *
+cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex)
 {
 	struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr;
 	struct dst_entry *dst;
@@ -719,7 +725,7 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
 	int port = 0xFFFF;
 	int err = 0;
 
-	rt = find_route_ipv6(NULL, &daddr6->sin6_addr);
+	rt = find_route_ipv6(NULL, &daddr6->sin6_addr, ifindex);
 
 	if (!rt) {
 		pr_info("no route to ipv6 %pI6 port %u\n",
@@ -2536,6 +2542,7 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
 	struct cxgbi_endpoint *cep;
 	struct cxgbi_hba *hba = NULL;
 	struct cxgbi_sock *csk;
+	int ifindex = 0;
 	int err = -EINVAL;
 
 	log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
@@ -2548,13 +2555,15 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
 			pr_info("shost 0x%p, priv NULL.\n", shost);
 			goto err_out;
 		}
+
+		ifindex = hba->ndev->ifindex;
 	}
 
 	if (dst_addr->sa_family == AF_INET) {
-		csk = cxgbi_check_route(dst_addr);
+		csk = cxgbi_check_route(dst_addr, ifindex);
 #if IS_ENABLED(CONFIG_IPV6)
 	} else if (dst_addr->sa_family == AF_INET6) {
-		csk = cxgbi_check_route6(dst_addr);
+		csk = cxgbi_check_route6(dst_addr, ifindex);
 #endif
 	} else {
 		pr_info("address family 0x%x NOT supported.\n",
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 6a4367cc9caabea8f0bbc1519d309ab50b55578f..76b8b7eed0c03ba160275d0b7ac200d3a67c1ab7 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -820,8 +820,7 @@ static void term_afu(struct cxlflash_cfg *cfg)
 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 		term_intr(cfg, UNMAP_THREE, k);
 
-	if (cfg->afu)
-		stop_afu(cfg);
+	stop_afu(cfg);
 
 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 		term_mc(cfg, k);
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index ad0f9968ccfbffdbf8fa4d4e570a61dc896d04ff..ed46e8df2e42192bcc44fd6e7b22748caf84ede5 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -1390,6 +1390,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 	if (unlikely(!ctxi)) {
 		dev_err(dev, "%s: Failed to create context ctxid=%d\n",
 			__func__, ctxid);
+		rc = -ENOMEM;
 		goto err;
 	}
 
@@ -1650,6 +1651,7 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
 	u64 ctxid = DECODE_CTXID(recover->context_id),
 	    rctxid = recover->context_id;
 	long reg;
+	bool locked = true;
 	int lretry = 20; /* up to 2 seconds */
 	int new_adap_fd = -1;
 	int rc = 0;
@@ -1658,8 +1660,11 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
 	up_read(&cfg->ioctl_rwsem);
 	rc = mutex_lock_interruptible(mutex);
 	down_read(&cfg->ioctl_rwsem);
-	if (rc)
+	if (rc) {
+		locked = false;
 		goto out;
+	}
+
 	rc = check_state(cfg);
 	if (rc) {
 		dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
@@ -1693,8 +1698,10 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
 				mutex_unlock(mutex);
 				msleep(100);
 				rc = mutex_lock_interruptible(mutex);
-				if (rc)
+				if (rc) {
+					locked = false;
 					goto out;
+				}
 				goto retry_recover;
 			}
 
@@ -1738,7 +1745,8 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
 out:
 	if (likely(ctxi))
 		put_context(ctxi);
-	mutex_unlock(mutex);
+	if (locked)
+		mutex_unlock(mutex);
 	atomic_dec_if_positive(&cfg->recovery_threads);
 	return rc;
 }
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index bdfb930614604f1aa6d698ae54097b92a6afa8f8..703bf1e9a64a1dd5db0736d8319d1d2229097e69 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -694,11 +694,7 @@ static int shrink_lxt(struct afu *afu,
 	/* Free LBAs allocated to freed chunks */
 	mutex_lock(&blka->mutex);
 	for (i = delta - 1; i >= 0; i--) {
-		/* Mask the higher 48 bits before shifting, even though
-		 * it is a noop
-		 */
-		aun = (lxt_old[my_new_size + i].rlba_base & SISL_ASTATUS_MASK);
-		aun = (aun >> MC_CHUNK_SHIFT);
+		aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT;
 		if (needs_ws)
 			write_same16(sdev, aun, MC_CHUNK_SIZE);
 		ba_free(&blka->ba_lun, aun);
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 6af3394d051dcebee8394e01a5cebe657f044968..003c3d72623842a7ba4b8f2b71dbb5c15fd68475 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -58,7 +58,7 @@ static struct scsi_host_template dmx3191d_driver_template = {
 	.info			= NCR5380_info,
 	.queuecommand		= NCR5380_queue_command,
 	.eh_abort_handler	= NCR5380_abort,
-	.eh_bus_reset_handler	= NCR5380_bus_reset,
+	.eh_host_reset_handler	= NCR5380_host_reset,
 	.can_queue		= 32,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 256dd6791fcc8bb0196e359ba6bd509c234080e3..fd172b0890d33a153e546e1faebad7d71a2d4937 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1169,11 +1169,6 @@ static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u6
 	if(chan < 0 || chan >= MAX_CHANNEL)
 		return NULL;
 	
-	if( pHba->channel[chan].device == NULL){
-		printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
-		return NULL;
-	}
-
 	d = pHba->channel[chan].device[id];
 	if(!d || d->tid == 0) {
 		return NULL;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 227dd2c2ec2f16d1cec7e7b2bb5894fa84862b5e..6501c330d8c84b6f977c8bd1b3364c885194093e 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1899,7 +1899,6 @@ static int eata2x_eh_abort(struct scsi_cmnd *SCarg)
 static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
 {
 	unsigned int i, time, k, c, limit = 0;
-	int arg_done = 0;
 	struct scsi_cmnd *SCpnt;
 	struct Scsi_Host *shost = SCarg->device->host;
 	struct hostdata *ha = (struct hostdata *)shost->hostdata;
@@ -1967,9 +1966,6 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
 		if (SCpnt->scsi_done == NULL)
 			panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n",
 			      ha->board_name, i);
-
-		if (SCpnt == SCarg)
-			arg_done = 1;
 	}
 
 	if (do_dma(shost->io_port, 0, RESET_PIO)) {
@@ -2037,10 +2033,7 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
 	ha->in_reset = 0;
 	do_trace = 0;
 
-	if (arg_done)
-		printk("%s: reset, exit, done.\n", ha->board_name);
-	else
-		printk("%s: reset, exit.\n", ha->board_name);
+	printk("%s: reset, exit.\n", ha->board_name);
 
 	spin_unlock_irq(shost->host_lock);
 	return SUCCESS;
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index f2e9d8aa979ca66c0f9226834b3d7ae8bd62769d..81f226be3e3b72067b87fe6d2553f91e7783b62f 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -309,7 +309,7 @@ MODULE_PARM_DESC(interrupt_mode,
 		 "Defines the interrupt mode to use.  0 for legacy"
 		 ", 1 for MSI.  Default is MSI (1).");
 
-static struct pci_device_id
+static const struct pci_device_id
 	esas2r_pci_table[] = {
 	{ ATTO_VENDOR_ID, 0x0049,	  ATTO_VENDOR_ID, 0x0049,
 	  0,
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 71cb05b1c3ebdd9d3d3631ee2da4a44a72b40731..c3fc34b9964df75a610dd70a74bd1ca23ee147b8 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -597,14 +597,12 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 
 		lp->non_tagged_cmd = ent;
 		return 0;
-	} else {
-		/* Tagged command, see if blocked by a
-		 * non-tagged one.
-		 */
-		if (lp->non_tagged_cmd || lp->hold)
-			return -EBUSY;
 	}
 
+	/* Tagged command. Check that it isn't blocked by a non-tagged one. */
+	if (lp->non_tagged_cmd || lp->hold)
+		return -EBUSY;
+
 	BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
 
 	lp->tagged_cmds[ent->orig_tag[1]] = ent;
@@ -1210,12 +1208,6 @@ static int esp_reconnect(struct esp *esp)
 
 	esp->active_cmd = ent;
 
-	if (ent->flags & ESP_CMD_FLAG_ABORT) {
-		esp->msg_out[0] = ABORT_TASK_SET;
-		esp->msg_out_len = 1;
-		scsi_esp_cmd(esp, ESP_CMD_SATN);
-	}
-
 	esp_event(esp, ESP_EVENT_CHECK_PHASE);
 	esp_restore_pointers(esp, ent);
 	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
@@ -1230,9 +1222,6 @@ static int esp_finish_select(struct esp *esp)
 {
 	struct esp_cmd_entry *ent;
 	struct scsi_cmnd *cmd;
-	u8 orig_select_state;
-
-	orig_select_state = esp->select_state;
 
 	/* No longer selecting.  */
 	esp->select_state = ESP_SELECT_NONE;
@@ -1496,9 +1485,8 @@ static void esp_msgin_reject(struct esp *esp)
 		return;
 	}
 
-	esp->msg_out[0] = ABORT_TASK_SET;
-	esp->msg_out_len = 1;
-	scsi_esp_cmd(esp, ESP_CMD_SATN);
+	shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
+	esp_schedule_reset(esp);
 }
 
 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
@@ -1621,7 +1609,7 @@ static void esp_msgin_extended(struct esp *esp)
 	shost_printk(KERN_INFO, esp->host,
 		     "Unexpected extended msg type %x\n", esp->msg_in[2]);
 
-	esp->msg_out[0] = ABORT_TASK_SET;
+	esp->msg_out[0] = MESSAGE_REJECT;
 	esp->msg_out_len = 1;
 	scsi_esp_cmd(esp, ESP_CMD_SATN);
 }
@@ -1745,7 +1733,6 @@ static int esp_process_event(struct esp *esp)
 			return 0;
 		}
 		goto again;
-		break;
 
 	case ESP_EVENT_DATA_IN:
 		write = 1;
@@ -1956,12 +1943,16 @@ static int esp_process_event(struct esp *esp)
 		} else {
 			if (esp->msg_out_len > 1)
 				esp->ops->dma_invalidate(esp);
-		}
 
-		if (!(esp->ireg & ESP_INTR_DC)) {
-			if (esp->rev != FASHME)
+			/* XXX if the chip went into disconnected mode,
+			 * we can't run the phase state machine anyway.
+			 */
+			if (!(esp->ireg & ESP_INTR_DC))
 				scsi_esp_cmd(esp, ESP_CMD_NULL);
 		}
+
+		esp->msg_out_len = 0;
+
 		esp_event(esp, ESP_EVENT_CHECK_PHASE);
 		goto again;
 	case ESP_EVENT_MSGIN:
@@ -1998,6 +1989,10 @@ static int esp_process_event(struct esp *esp)
 
 			scsi_esp_cmd(esp, ESP_CMD_MOK);
 
+			/* Check whether a bus reset is to be done next */
+			if (esp->event == ESP_EVENT_RESET)
+				return 0;
+
 			if (esp->event != ESP_EVENT_FREE_BUS)
 				esp_event(esp, ESP_EVENT_CHECK_PHASE);
 		} else {
@@ -2022,7 +2017,6 @@ static int esp_process_event(struct esp *esp)
 		}
 		esp_schedule_reset(esp);
 		return 0;
-		break;
 
 	case ESP_EVENT_RESET:
 		scsi_esp_cmd(esp, ESP_CMD_RS);
@@ -2033,7 +2027,6 @@ static int esp_process_event(struct esp *esp)
 			     "Unexpected event %x, resetting\n", esp->event);
 		esp_schedule_reset(esp);
 		return 0;
-		break;
 	}
 	return 1;
 }
@@ -2170,14 +2163,14 @@ static void __esp_interrupt(struct esp *esp)
 
 		esp_schedule_reset(esp);
 	} else {
-		if (!(esp->ireg & ESP_INTR_RSEL)) {
-			/* Some combination of FDONE, BSERV, DC.  */
-			if (esp->select_state != ESP_SELECT_NONE)
-				intr_done = esp_finish_select(esp);
-		} else if (esp->ireg & ESP_INTR_RSEL) {
+		if (esp->ireg & ESP_INTR_RSEL) {
 			if (esp->active_cmd)
 				(void) esp_finish_select(esp);
 			intr_done = esp_reconnect(esp);
+		} else {
+			/* Some combination of FDONE, BSERV, DC. */
+			if (esp->select_state != ESP_SELECT_NONE)
+				intr_done = esp_finish_select(esp);
 		}
 	}
 	while (!intr_done)
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 84dcbe4a6268bf740e02ac6d948890cbb2723ccb..7e8932ae91f8ab552cb7a8777a8f529185a143c5 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -281,7 +281,6 @@ struct esp_cmd_entry {
 
 	u8			flags;
 #define ESP_CMD_FLAG_WRITE	0x01 /* DMA is a write */
-#define ESP_CMD_FLAG_ABORT	0x02 /* being aborted */
 #define ESP_CMD_FLAG_AUTOSENSE	0x04 /* Doing automatic REQUEST_SENSE */
 #define ESP_CMD_FLAG_RESIDUAL	0x08 /* AM53c974 BLAST residual */
 
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 9cf3d56296ab87fca59da317412199afc354e5ba..5c8310bade619d63b1d239dc1878da7bf606f3fa 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -659,13 +659,13 @@ static void fcoe_fcf_device_release(struct device *dev)
 	kfree(fcf);
 }
 
-static struct device_type fcoe_ctlr_device_type = {
+static const struct device_type fcoe_ctlr_device_type = {
 	.name = "fcoe_ctlr",
 	.groups = fcoe_ctlr_attr_groups,
 	.release = fcoe_ctlr_device_release,
 };
 
-static struct device_type fcoe_fcf_device_type = {
+static const struct device_type fcoe_fcf_device_type = {
 	.name = "fcoe_fcf",
 	.groups = fcoe_fcf_attr_groups,
 	.release = fcoe_fcf_device_release,
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index b87ab38a4530118bbebb4d7e2aa3dbb3f689223f..ebbe5a3e665dae1e2176c027c367d52ddc1d2cdd 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -933,7 +933,7 @@ struct Scsi_Host *__fdomain_16x0_detect(struct scsi_host_template *tpnt )
       }
    }
 
-   fdomain_16x0_bus_reset(NULL);
+   fdomain_16x0_host_reset(NULL);
 
    if (fdomain_test_loopback()) {
       printk(KERN_ERR  "scsi: <fdomain> Detection failed (loopback test failed at port base 0x%x)\n", port_base);
@@ -1568,7 +1568,7 @@ static int fdomain_16x0_abort(struct scsi_cmnd *SCpnt)
    return SUCCESS;
 }
 
-int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt)
+int fdomain_16x0_host_reset(struct scsi_cmnd *SCpnt)
 {
    unsigned long flags;
 
@@ -1758,7 +1758,7 @@ struct scsi_host_template fdomain_driver_template = {
 	.info			= fdomain_16x0_info,
 	.queuecommand		= fdomain_16x0_queue,
 	.eh_abort_handler	= fdomain_16x0_abort,
-	.eh_bus_reset_handler	= fdomain_16x0_bus_reset,
+	.eh_host_reset_handler	= fdomain_16x0_host_reset,
 	.bios_param		= fdomain_16x0_biosparam,
 	.release		= fdomain_16x0_release,
 	.can_queue		= 1,
diff --git a/drivers/scsi/fdomain.h b/drivers/scsi/fdomain.h
index 47021d9d4fe4c01a511820159237cc8cb9722a17..5cbe86b573aee36b992262c5d2dee1f5ba74294d 100644
--- a/drivers/scsi/fdomain.h
+++ b/drivers/scsi/fdomain.h
@@ -21,4 +21,4 @@
 extern struct scsi_host_template fdomain_driver_template;
 extern int fdomain_setup(char *str);
 extern struct Scsi_Host *__fdomain_16x0_detect(struct  scsi_host_template *tpnt );
-extern int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt);
+extern int fdomain_16x0_host_reset(struct scsi_cmnd *SCpnt);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 67aab965c0f4ff59b247e8e6d88e0a0d08674efe..d094ba59ed156b5a5d8fad3775bba9dc5451327b 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -180,7 +180,7 @@ enum fnic_msix_intr_index {
 
 struct fnic_msix_entry {
 	int requested;
-	char devname[IFNAMSIZ];
+	char devname[IFNAMSIZ + 11];
 	irqreturn_t (*isr)(int, void *);
 	void *devid;
 };
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 6c0646d62dfb3344c09b7b4d32b1f922557266d3..242e2ee494a1cafc2987a133534445da90f71cf8 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1990,10 +1990,6 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 			"Issuing Host reset due to out of order IO\n");
 
-		if (fnic_host_reset(sc) == FAILED) {
-			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-				"fnic_host_reset failed.\n");
-		}
 		ret = FAILED;
 		goto fnic_abort_cmd_end;
 	}
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index c34fc91ba48668c09970c5a83a6bd38fa90ccd12..fc538181f8df16a2839835d442257a66875f6f82 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -1,17 +1,17 @@
 /*
  * Generic Generic NCR5380 driver
- *	
+ *
  * Copyright 1993, Drew Eckhardt
- *	Visionary Computing
- *	(Unix and Linux consulting and custom programming)
- *	drew@colorado.edu
- *      +1 (303) 440-4894
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
  *
  * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
- *    K.Lentin@cs.monash.edu.au
+ * K.Lentin@cs.monash.edu.au
  *
  * NCR53C400A extensions (c) 1996, Ingmar Baumgart
- *    ingmar@gonzo.schwaben.de
+ * ingmar@gonzo.schwaben.de
  *
  * DTC3181E extensions (c) 1997, Ronald van Cuijlenborg
  * ronald.van.cuijlenborg@tip.nl or nutty@dds.nl
@@ -44,17 +44,19 @@
 	int c400_ctl_status; \
 	int c400_blk_cnt; \
 	int c400_host_buf; \
-	int io_width
+	int io_width; \
+	int pdma_residual; \
+	int board
 
 #define NCR5380_dma_xfer_len            generic_NCR5380_dma_xfer_len
-#define NCR5380_dma_recv_setup          generic_NCR5380_pread
-#define NCR5380_dma_send_setup          generic_NCR5380_pwrite
-#define NCR5380_dma_residual            NCR5380_dma_residual_none
+#define NCR5380_dma_recv_setup          generic_NCR5380_precv
+#define NCR5380_dma_send_setup          generic_NCR5380_psend
+#define NCR5380_dma_residual            generic_NCR5380_dma_residual
 
 #define NCR5380_intr                    generic_NCR5380_intr
 #define NCR5380_queue_command           generic_NCR5380_queue_command
 #define NCR5380_abort                   generic_NCR5380_abort
-#define NCR5380_bus_reset               generic_NCR5380_bus_reset
+#define NCR5380_host_reset              generic_NCR5380_host_reset
 #define NCR5380_info                    generic_NCR5380_info
 
 #define NCR5380_io_delay(x)             udelay(x)
@@ -76,6 +78,7 @@
 #define IRQ_AUTO 254
 
 #define MAX_CARDS 8
+#define DMA_MAX_SIZE 32768
 
 /* old-style parameters for compatibility */
 static int ncr_irq = -1;
@@ -314,6 +317,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
 	}
 	hostdata = shost_priv(instance);
 
+	hostdata->board = board;
 	hostdata->io = iomem;
 	hostdata->region_size = region_size;
 
@@ -478,180 +482,210 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
 		release_mem_region(base, region_size);
 }
 
+/* wait_for_53c80_access - wait for 53C80 registers to become accessible
+ * @hostdata: scsi host private data
+ *
+ * The registers within the 53C80 logic block are inaccessible until
+ * bit 7 in the 53C400 control status register gets asserted.
+ */
+
+static void wait_for_53c80_access(struct NCR5380_hostdata *hostdata)
+{
+	int count = 10000;
+
+	do {
+		if (hostdata->board == BOARD_DTC3181E)
+			udelay(4); /* DTC436 chip hangs without this */
+		if (NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG)
+			return;
+	} while (--count > 0);
+
+	scmd_printk(KERN_ERR, hostdata->connected,
+	            "53c80 registers not accessible, device will be reset\n");
+	NCR5380_write(hostdata->c400_ctl_status, CSR_RESET);
+	NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
+}
+
 /**
- *	generic_NCR5380_pread - pseudo DMA read
- *	@hostdata: scsi host private data
- *	@dst: buffer to read into
- *	@len: buffer length
+ * generic_NCR5380_precv - pseudo DMA receive
+ * @hostdata: scsi host private data
+ * @dst: buffer to write into
+ * @len: transfer size
  *
- *	Perform a pseudo DMA mode read from an NCR53C400 or equivalent
- *	controller
+ * Perform a pseudo DMA mode receive from a 53C400 or equivalent device.
  */
- 
-static inline int generic_NCR5380_pread(struct NCR5380_hostdata *hostdata,
+
+static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata,
                                         unsigned char *dst, int len)
 {
-	int blocks = len / 128;
+	int residual;
 	int start = 0;
 
 	NCR5380_write(hostdata->c400_ctl_status, CSR_BASE | CSR_TRANS_DIR);
-	NCR5380_write(hostdata->c400_blk_cnt, blocks);
-	while (1) {
-		if (NCR5380_read(hostdata->c400_blk_cnt) == 0)
-			break;
-		if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) {
-			printk(KERN_ERR "53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
-			return -1;
+	NCR5380_write(hostdata->c400_blk_cnt, len / 128);
+
+	do {
+		if (start == len - 128) {
+			/* Ignore End of DMA interrupt for the final buffer */
+			if (NCR5380_poll_politely(hostdata, hostdata->c400_ctl_status,
+			                          CSR_HOST_BUF_NOT_RDY, 0, HZ / 64) < 0)
+				break;
+		} else {
+			if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status,
+			                           CSR_HOST_BUF_NOT_RDY, 0,
+			                           hostdata->c400_ctl_status,
+			                           CSR_GATED_53C80_IRQ,
+			                           CSR_GATED_53C80_IRQ, HZ / 64) < 0 ||
+			    NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
+				break;
 		}
-		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
-			; /* FIXME - no timeout */
 
 		if (hostdata->io_port && hostdata->io_width == 2)
 			insw(hostdata->io_port + hostdata->c400_host_buf,
-							dst + start, 64);
+			     dst + start, 64);
 		else if (hostdata->io_port)
 			insb(hostdata->io_port + hostdata->c400_host_buf,
-							dst + start, 128);
+			     dst + start, 128);
 		else
 			memcpy_fromio(dst + start,
 				hostdata->io + NCR53C400_host_buffer, 128);
-
 		start += 128;
-		blocks--;
-	}
+	} while (start < len);
 
-	if (blocks) {
-		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
-			; /* FIXME - no timeout */
+	residual = len - start;
 
-		if (hostdata->io_port && hostdata->io_width == 2)
-			insw(hostdata->io_port + hostdata->c400_host_buf,
-							dst + start, 64);
-		else if (hostdata->io_port)
-			insb(hostdata->io_port + hostdata->c400_host_buf,
-							dst + start, 128);
-		else
-			memcpy_fromio(dst + start,
-				hostdata->io + NCR53C400_host_buffer, 128);
-
-		start += 128;
-		blocks--;
+	if (residual != 0) {
+		/* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */
+		NCR5380_write(hostdata->c400_ctl_status, CSR_RESET);
+		NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
 	}
+	wait_for_53c80_access(hostdata);
 
-	if (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ))
-		printk("53C400r: no 53C80 gated irq after transfer");
+	if (residual == 0 && NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+	                                           BASR_END_DMA_TRANSFER,
+	                                           BASR_END_DMA_TRANSFER,
+	                                           HZ / 64) < 0)
+		scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n",
+		            __func__);
 
-	/* wait for 53C80 registers to be available */
-	while (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG))
-		;
+	hostdata->pdma_residual = residual;
 
-	if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER))
-		printk(KERN_ERR "53C400r: no end dma signal\n");
-		
 	return 0;
 }
 
 /**
- *	generic_NCR5380_pwrite - pseudo DMA write
- *	@hostdata: scsi host private data
- *	@dst: buffer to read into
- *	@len: buffer length
+ * generic_NCR5380_psend - pseudo DMA send
+ * @hostdata: scsi host private data
+ * @src: buffer to read from
+ * @len: transfer size
  *
- *	Perform a pseudo DMA mode read from an NCR53C400 or equivalent
- *	controller
+ * Perform a pseudo DMA mode send to a 53C400 or equivalent device.
  */
 
-static inline int generic_NCR5380_pwrite(struct NCR5380_hostdata *hostdata,
-                                         unsigned char *src, int len)
+static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
+                                        unsigned char *src, int len)
 {
-	int blocks = len / 128;
+	int residual;
 	int start = 0;
 
 	NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
-	NCR5380_write(hostdata->c400_blk_cnt, blocks);
-	while (1) {
-		if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) {
-			printk(KERN_ERR "53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
-			return -1;
+	NCR5380_write(hostdata->c400_blk_cnt, len / 128);
+
+	do {
+		if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status,
+		                           CSR_HOST_BUF_NOT_RDY, 0,
+		                           hostdata->c400_ctl_status,
+		                           CSR_GATED_53C80_IRQ,
+		                           CSR_GATED_53C80_IRQ, HZ / 64) < 0 ||
+		    NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) {
+			/* Both 128 B buffers are in use */
+			if (start >= 128)
+				start -= 128;
+			if (start >= 128)
+				start -= 128;
+			break;
 		}
 
-		if (NCR5380_read(hostdata->c400_blk_cnt) == 0)
+		if (start >= len && NCR5380_read(hostdata->c400_blk_cnt) == 0)
 			break;
-		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
-			; // FIXME - timeout
 
-		if (hostdata->io_port && hostdata->io_width == 2)
-			outsw(hostdata->io_port + hostdata->c400_host_buf,
-							src + start, 64);
-		else if (hostdata->io_port)
-			outsb(hostdata->io_port + hostdata->c400_host_buf,
-							src + start, 128);
-		else
-			memcpy_toio(hostdata->io + NCR53C400_host_buffer,
-			            src + start, 128);
+		if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) {
+			/* Host buffer is empty, other one is in use */
+			if (start >= 128)
+				start -= 128;
+			break;
+		}
 
-		start += 128;
-		blocks--;
-	}
-	if (blocks) {
-		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
-			; // FIXME - no timeout
+		if (start >= len)
+			continue;
 
 		if (hostdata->io_port && hostdata->io_width == 2)
 			outsw(hostdata->io_port + hostdata->c400_host_buf,
-							src + start, 64);
+			      src + start, 64);
 		else if (hostdata->io_port)
 			outsb(hostdata->io_port + hostdata->c400_host_buf,
-							src + start, 128);
+			      src + start, 128);
 		else
 			memcpy_toio(hostdata->io + NCR53C400_host_buffer,
 			            src + start, 128);
-
 		start += 128;
-		blocks--;
-	}
+	} while (1);
 
-	/* wait for 53C80 registers to be available */
-	while (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG)) {
-		udelay(4); /* DTC436 chip hangs without this */
-		/* FIXME - no timeout */
-	}
+	residual = len - start;
 
-	if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) {
-		printk(KERN_ERR "53C400w: no end dma signal\n");
+	if (residual != 0) {
+		/* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */
+		NCR5380_write(hostdata->c400_ctl_status, CSR_RESET);
+		NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
 	}
+	wait_for_53c80_access(hostdata);
+
+	if (residual == 0) {
+		if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
+		                          TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT,
+		                          HZ / 64) < 0)
+			scmd_printk(KERN_ERR, hostdata->connected,
+			            "%s: Last Byte Sent timeout\n", __func__);
+
+		if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+		                          BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER,
+		                          HZ / 64) < 0)
+			scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n",
+			            __func__);
+	}
+
+	hostdata->pdma_residual = residual;
 
-	while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
-		; 	// TIMEOUT
 	return 0;
 }
 
 static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
                                         struct scsi_cmnd *cmd)
 {
-	int transfersize = cmd->transfersize;
+	int transfersize = cmd->SCp.this_residual;
 
 	if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
 		return 0;
 
-	/* Limit transfers to 32K, for xx400 & xx406
-	 * pseudoDMA that transfers in 128 bytes blocks.
-	 */
-	if (transfersize > 32 * 1024 && cmd->SCp.this_residual &&
-	    !(cmd->SCp.this_residual % transfersize))
-		transfersize = 32 * 1024;
-
 	/* 53C400 datasheet: non-modulo-128-byte transfers should use PIO */
 	if (transfersize % 128)
-		transfersize = 0;
+		return 0;
+
+	/* Limit PDMA send to 512 B to avoid random corruption on DTC3181E */
+	if (hostdata->board == BOARD_DTC3181E &&
+	    cmd->sc_data_direction == DMA_TO_DEVICE)
+		transfersize = min(cmd->SCp.this_residual, 512);
 
-	return transfersize;
+	return min(transfersize, DMA_MAX_SIZE);
 }
 
-/*
- *	Include the NCR5380 core code that we build our driver around	
- */
- 
+static int generic_NCR5380_dma_residual(struct NCR5380_hostdata *hostdata)
+{
+	return hostdata->pdma_residual;
+}
+
+/* Include the core driver code. */
+
 #include "NCR5380.c"
 
 static struct scsi_host_template driver_template = {
@@ -661,7 +695,7 @@ static struct scsi_host_template driver_template = {
 	.info			= generic_NCR5380_info,
 	.queuecommand		= generic_NCR5380_queue_command,
 	.eh_abort_handler	= generic_NCR5380_abort,
-	.eh_bus_reset_handler	= generic_NCR5380_bus_reset,
+	.eh_host_reset_handler	= generic_NCR5380_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
@@ -671,11 +705,10 @@ static struct scsi_host_template driver_template = {
 	.max_sectors		= 128,
 };
 
-
 static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev)
 {
 	int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev],
-					  irq[ndev], card[ndev]);
+	                                   irq[ndev], card[ndev]);
 	if (ret) {
 		if (base[ndev])
 			printk(KERN_WARNING "Card not found at address 0x%03x\n",
@@ -687,7 +720,7 @@ static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev)
 }
 
 static int generic_NCR5380_isa_remove(struct device *pdev,
-				   unsigned int ndev)
+                                      unsigned int ndev)
 {
 	generic_NCR5380_release_resources(dev_get_drvdata(pdev));
 	dev_set_drvdata(pdev, NULL);
@@ -703,14 +736,14 @@ static struct isa_driver generic_NCR5380_isa_driver = {
 };
 
 #ifdef CONFIG_PNP
-static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
+static const struct pnp_device_id generic_NCR5380_pnp_ids[] = {
 	{ .id = "DTC436e", .driver_data = BOARD_DTC3181E },
 	{ .id = "" }
 };
 MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids);
 
 static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev,
-			       const struct pnp_device_id *id)
+                                     const struct pnp_device_id *id)
 {
 	int base, irq;
 
@@ -721,7 +754,7 @@ static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev,
 	irq = pnp_irq(pdev, 0);
 
 	return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq,
-				       id->driver_data);
+	                                id->driver_data);
 }
 
 static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev)
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index facc7271f9326549c6009a6da6d33f69e2ecab87..a4473356a9dc2a4fcfd1f4d34d3096ba50797b5f 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2354,7 +2354,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
         inq.resp_aenc = 2;
         inq.add_length= 32;
         strcpy(inq.vendor,ha->oem_name);
-        sprintf(inq.product,"Host Drive  #%02d",t);
+        snprintf(inq.product, sizeof(inq.product), "Host Drive  #%02d",t);
         strcpy(inq.revision,"   ");
         gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
         break;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index be609db668075410bf5661fd2837bc13a8b64aa1..d08b2716752c9d44cb50e7ba2b63e0541e09c939 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -147,7 +147,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
 
     gdth_cmd_str *gdtcmd;
     gdth_evt_str *estr;
-    char hrec[161];
+    char hrec[277];
 
     char *buf;
     gdth_dskstat_str *pds;
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 3b6f83ffddc4e87c59057d0f7eb8a4ded51f11f4..a27fc49ebd3acd5ed7063e8254d6736fca5ca4e6 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -171,23 +171,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
 	}
 }
 
-static int gvp11_bus_reset(struct scsi_cmnd *cmd)
-{
-	struct Scsi_Host *instance = cmd->device->host;
-
-	/* FIXME perform bus-specific reset */
-
-	/* FIXME 2: shouldn't we no-op this function (return
-	   FAILED), and fall back to host reset function,
-	   wd33c93_host_reset ? */
-
-	spin_lock_irq(instance->host_lock);
-	wd33c93_host_reset(cmd);
-	spin_unlock_irq(instance->host_lock);
-
-	return SUCCESS;
-}
-
 static struct scsi_host_template gvp11_scsi_template = {
 	.module			= THIS_MODULE,
 	.name			= "GVP Series II SCSI",
@@ -196,7 +179,6 @@ static struct scsi_host_template gvp11_scsi_template = {
 	.proc_name		= "GVP11",
 	.queuecommand		= wd33c93_queuecommand,
 	.eh_abort_handler	= wd33c93_abort,
-	.eh_bus_reset_handler	= gvp11_bus_reset,
 	.eh_host_reset_handler	= wd33c93_host_reset,
 	.can_queue		= CAN_QUEUE,
 	.this_id		= 7,
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index a722f2bd72abf0686453947599ce8f6fd6daf6bf..07f4a4cfbec1b3275280eb0193009fbf3e72b264 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -15,6 +15,7 @@
 #include <linux/acpi.h>
 #include <linux/clk.h>
 #include <linux/dmapool.h>
+#include <linux/iopoll.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
@@ -25,14 +26,13 @@
 #include <scsi/sas_ata.h>
 #include <scsi/libsas.h>
 
-#define DRV_VERSION "v1.6"
-
 #define HISI_SAS_MAX_PHYS	9
 #define HISI_SAS_MAX_QUEUES	32
 #define HISI_SAS_QUEUE_SLOTS 512
 #define HISI_SAS_MAX_ITCT_ENTRIES 2048
 #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
 #define HISI_SAS_RESET_BIT	0
+#define HISI_SAS_REJECT_CMD_BIT	1
 
 #define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
 #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -90,6 +90,14 @@ enum hisi_sas_dev_type {
 	HISI_SAS_DEV_TYPE_SATA,
 };
 
+struct hisi_sas_hw_error {
+	u32 irq_msk;
+	u32 msk;
+	int shift;
+	const char *msg;
+	int reg;
+};
+
 struct hisi_sas_phy {
 	struct hisi_hba	*hisi_hba;
 	struct hisi_sas_port	*port;
@@ -132,6 +140,7 @@ struct hisi_sas_dq {
 struct hisi_sas_device {
 	struct hisi_hba		*hisi_hba;
 	struct domain_device	*sas_device;
+	struct completion *completion;
 	struct hisi_sas_dq	*dq;
 	struct list_head	list;
 	u64 attached_phy;
@@ -192,6 +201,7 @@ struct hisi_sas_hw {
 	void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
+	void (*get_events)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
 			struct sas_phy_linkrates *linkrates);
 	enum sas_linkrate (*phy_get_max_linkrate)(void);
@@ -201,6 +211,7 @@ struct hisi_sas_hw {
 	void (*dereg_device)(struct hisi_hba *hisi_hba,
 				struct domain_device *device);
 	int (*soft_reset)(struct hisi_hba *hisi_hba);
+	u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
 	int max_command_entries;
 	int complete_hdr_size;
 };
@@ -390,6 +401,7 @@ struct hisi_sas_slot_buf_table {
 extern struct scsi_transport_template *hisi_sas_stt;
 extern struct scsi_host_template *hisi_sas_sht;
 
+extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
 extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
 extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
 extern void hisi_sas_free(struct hisi_hba *hisi_hba);
@@ -408,6 +420,4 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
 				    struct sas_task *task,
 				    struct hisi_sas_slot *slot);
 extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
-extern void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
-				     u32 state);
 #endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 4022c3f8295f7e04161f30884763b99dda4dcc56..16664f2e15fbadd77d30439d723f1d972d0efbbf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -61,6 +61,7 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
 	case ATA_CMD_WRITE_QUEUED:
 	case ATA_CMD_WRITE_LOG_DMA_EXT:
 	case ATA_CMD_WRITE_STREAM_DMA_EXT:
+	case ATA_CMD_ZAC_MGMT_IN:
 	return HISI_SAS_SATA_PROTOCOL_DMA;
 
 	case ATA_CMD_CHK_POWER:
@@ -73,6 +74,7 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
 	case ATA_CMD_SET_FEATURES:
 	case ATA_CMD_STANDBY:
 	case ATA_CMD_STANDBYNOW1:
+	case ATA_CMD_ZAC_MGMT_OUT:
 	return HISI_SAS_SATA_PROTOCOL_NONDATA;
 	default:
 		if (direction == DMA_NONE)
@@ -125,6 +127,15 @@ struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
 }
 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
 
+void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
+{
+	int phy_no;
+
+	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
+		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
+
 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
 {
 	void *bitmap = hisi_hba->slot_index_tags;
@@ -433,7 +444,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
 	struct hisi_sas_device *sas_dev = device->lldd_dev;
 	struct hisi_sas_dq *dq = sas_dev->dq;
 
-	if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
+	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
 		return -EINVAL;
 
 	/* protect task_prep and start_delivery sequence */
@@ -716,7 +727,6 @@ static void hisi_sas_dev_gone(struct domain_device *device)
 	struct hisi_sas_device *sas_dev = device->lldd_dev;
 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 	struct device *dev = hisi_hba->dev;
-	int dev_id = sas_dev->device_id;
 
 	dev_info(dev, "found dev[%d:%x] is gone\n",
 		 sas_dev->device_id, sas_dev->dev_type);
@@ -729,9 +739,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
 	hisi_hba->hw->free_device(hisi_hba, sas_dev);
 	device->lldd_dev = NULL;
 	memset(sas_dev, 0, sizeof(*sas_dev));
-	sas_dev->device_id = dev_id;
 	sas_dev->dev_type = SAS_PHY_UNUSED;
-	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
 }
 
 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
@@ -764,7 +772,12 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
 	case PHY_FUNC_SET_LINK_RATE:
 		hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
 		break;
-
+	case PHY_FUNC_GET_EVENTS:
+		if (hisi_hba->hw->get_events) {
+			hisi_hba->hw->get_events(hisi_hba, phy_no);
+			break;
+		}
+		/* fallthru */
 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
 	default:
 		return -EOPNOTSUPP;
@@ -967,37 +980,117 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
 				sizeof(ssp_task), tmf);
 }
 
+static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
+		struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
+{
+	struct hisi_sas_device	*sas_dev;
+	struct domain_device *device;
+	int i;
+
+	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+		sas_dev = &hisi_hba->devices[i];
+		device = sas_dev->sas_device;
+		if ((sas_dev->dev_type == SAS_PHY_UNUSED)
+				|| !device || (device->port != sas_port))
+			continue;
+
+		hisi_hba->hw->free_device(hisi_hba, sas_dev);
+
+		/* Update linkrate of directly attached device. */
+		if (!device->parent)
+			device->linkrate = linkrate;
+
+		hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+	}
+}
+
+static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
+			      u32 state)
+{
+	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+	struct asd_sas_port *_sas_port = NULL;
+	int phy_no;
+
+	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+		struct asd_sas_phy *sas_phy = &phy->sas_phy;
+		struct asd_sas_port *sas_port = sas_phy->port;
+		struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+		bool do_port_check = !!(_sas_port != sas_port);
+
+		if (!sas_phy->phy->enabled)
+			continue;
+
+		/* Report PHY state change to libsas */
+		if (state & (1 << phy_no)) {
+			if (do_port_check && sas_port) {
+				struct domain_device *dev = sas_port->port_dev;
+
+				_sas_port = sas_port;
+				port->id = phy->port_id;
+				hisi_sas_refresh_port_id(hisi_hba,
+						sas_port, sas_phy->linkrate);
+
+				if (DEV_IS_EXPANDER(dev->dev_type))
+					sas_ha->notify_port_event(sas_phy,
+							PORTE_BROADCAST_RCVD);
+			}
+		} else if (old_state & (1 << phy_no))
+			/* PHY down but was up before */
+			hisi_sas_phy_down(hisi_hba, phy_no, 0);
+
+	}
+
+	drain_workqueue(hisi_hba->shost->work_q);
+}
+
 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
 {
+	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+	struct device *dev = hisi_hba->dev;
+	struct Scsi_Host *shost = hisi_hba->shost;
+	u32 old_state, state;
+	unsigned long flags;
 	int rc;
 
 	if (!hisi_hba->hw->soft_reset)
 		return -1;
 
-	if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
-		struct device *dev = hisi_hba->dev;
-		struct sas_ha_struct *sas_ha = &hisi_hba->sha;
-		unsigned long flags;
+	if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+		return -1;
 
-		dev_dbg(dev, "controller reset begins!\n");
-		scsi_block_requests(hisi_hba->shost);
-		rc = hisi_hba->hw->soft_reset(hisi_hba);
-		if (rc) {
-			dev_warn(dev, "controller reset failed (%d)\n", rc);
-			goto out;
-		}
-		spin_lock_irqsave(&hisi_hba->lock, flags);
-		hisi_sas_release_tasks(hisi_hba);
-		spin_unlock_irqrestore(&hisi_hba->lock, flags);
+	dev_dbg(dev, "controller resetting...\n");
+	old_state = hisi_hba->hw->get_phys_state(hisi_hba);
 
-		sas_ha->notify_ha_event(sas_ha, HAE_RESET);
-		dev_dbg(dev, "controller reset successful!\n");
-	} else
-		return -1;
+	scsi_block_requests(shost);
+	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+	rc = hisi_hba->hw->soft_reset(hisi_hba);
+	if (rc) {
+		dev_warn(dev, "controller reset failed (%d)\n", rc);
+		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+		goto out;
+	}
+	spin_lock_irqsave(&hisi_hba->lock, flags);
+	hisi_sas_release_tasks(hisi_hba);
+	spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+	sas_ha->notify_ha_event(sas_ha, HAE_RESET);
+	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+
+	/* Init and wait for PHYs to come up and all libsas event finished. */
+	hisi_hba->hw->phys_init(hisi_hba);
+	msleep(1000);
+	drain_workqueue(hisi_hba->wq);
+	drain_workqueue(shost->work_q);
+
+	state = hisi_hba->hw->get_phys_state(hisi_hba);
+	hisi_sas_rescan_topology(hisi_hba, old_state, state);
+	dev_dbg(dev, "controller reset complete\n");
 
 out:
-	scsi_unblock_requests(hisi_hba->shost);
+	scsi_unblock_requests(shost);
 	clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+
 	return rc;
 }
 
@@ -1241,7 +1334,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
 	int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
 	unsigned long flags, flags_dq;
 
-	if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
+	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
 		return -EINVAL;
 
 	if (!device->port)
@@ -1279,12 +1372,21 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
 	slot->port = port;
 	task->lldd_task = slot;
 
+	slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
+			GFP_ATOMIC, &slot->buf_dma);
+	if (!slot->buf) {
+		rc = -ENOMEM;
+		goto err_out_tag;
+	}
+
 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
+	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
+	memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
 
 	rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
 				      abort_flag, task_tag);
 	if (rc)
-		goto err_out_tag;
+		goto err_out_buf;
 
 
 	list_add_tail(&slot->entry, &sas_dev->list);
@@ -1302,6 +1404,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
 
 	return 0;
 
+err_out_buf:
+	dma_pool_free(hisi_hba->buffer_pool, slot->buf,
+		slot->buf_dma);
 err_out_tag:
 	spin_lock_irqsave(&hisi_hba->lock, flags);
 	hisi_sas_slot_index_free(hisi_hba, slot_idx);
@@ -1437,36 +1542,6 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
 }
 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
 
-void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
-			      u32 state)
-{
-	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
-	int phy_no;
-
-	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
-		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
-		struct asd_sas_phy *sas_phy = &phy->sas_phy;
-		struct asd_sas_port *sas_port = sas_phy->port;
-		struct domain_device *dev;
-
-		if (sas_phy->enabled) {
-			/* Report PHY state change to libsas */
-			if (state & (1 << phy_no))
-				continue;
-
-			if (old_state & (1 << phy_no))
-				/* PHY down but was up before */
-				hisi_sas_phy_down(hisi_hba, phy_no, 0);
-		}
-		if (!sas_port)
-			continue;
-		dev = sas_port->port_dev;
-
-		if (DEV_IS_EXPANDER(dev->dev_type))
-			sas_ha->notify_phy_event(sas_phy, PORTE_BROADCAST_RCVD);
-	}
-}
-EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology);
 
 struct scsi_transport_template *hisi_sas_stt;
 EXPORT_SYMBOL_GPL(hisi_sas_stt);
@@ -1487,7 +1562,7 @@ static struct scsi_host_template _hisi_sas_sht = {
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
-	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 };
@@ -1825,7 +1900,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
 
 	return shost;
 err_out:
-	kfree(shost);
+	scsi_host_put(shost);
 	dev_err(dev, "shost alloc failed\n");
 	return NULL;
 }
@@ -1916,7 +1991,7 @@ int hisi_sas_probe(struct platform_device *pdev,
 	scsi_remove_host(shost);
 err_out_ha:
 	hisi_sas_free(hisi_hba);
-	kfree(shost);
+	scsi_host_put(shost);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(hisi_sas_probe);
@@ -1931,15 +2006,13 @@ int hisi_sas_remove(struct platform_device *pdev)
 	sas_remove_host(sha->core.shost);
 
 	hisi_sas_free(hisi_hba);
-	kfree(shost);
+	scsi_host_put(shost);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(hisi_sas_remove);
 
 static __init int hisi_sas_init(void)
 {
-	pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
-
 	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
 	if (!hisi_sas_stt)
 		return -ENOMEM;
@@ -1955,7 +2028,6 @@ static __exit void hisi_sas_exit(void)
 module_init(hisi_sas_init);
 module_exit(hisi_sas_exit);
 
-MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
 MODULE_DESCRIPTION("HISILICON SAS controller driver");
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 2bfea7082e3a070f0e6e44f27837df86b8f75c89..779af979b6db93380386210c4a19f6231ecf8fc1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -256,6 +256,8 @@
 #define LINK_DFX2_RCVR_HOLD_STS_MSK	(0x1 << LINK_DFX2_RCVR_HOLD_STS_OFF)
 #define LINK_DFX2_SEND_HOLD_STS_OFF	10
 #define LINK_DFX2_SEND_HOLD_STS_MSK	(0x1 << LINK_DFX2_SEND_HOLD_STS_OFF)
+#define SAS_ERR_CNT4_REG		(PORT_BASE + 0x290)
+#define SAS_ERR_CNT6_REG		(PORT_BASE + 0x298)
 #define PHY_CTRL_RDY_MSK		(PORT_BASE + 0x2b0)
 #define PHYCTRL_NOT_RDY_MSK		(PORT_BASE + 0x2b4)
 #define PHYCTRL_DWS_RESET_MSK		(PORT_BASE + 0x2b8)
@@ -399,6 +401,172 @@ struct hisi_sas_err_record_v2 {
 	__le32 dma_rx_err_type;
 };
 
+static const struct hisi_sas_hw_error one_bit_ecc_errors[] = {
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF),
+		.msk = HGC_DQE_ECC_1B_ADDR_MSK,
+		.shift = HGC_DQE_ECC_1B_ADDR_OFF,
+		.msg = "hgc_dqe_acc1b_intr found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_DQE_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF),
+		.msk = HGC_IOST_ECC_1B_ADDR_MSK,
+		.shift = HGC_IOST_ECC_1B_ADDR_OFF,
+		.msg = "hgc_iost_acc1b_intr found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_IOST_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF),
+		.msk = HGC_ITCT_ECC_1B_ADDR_MSK,
+		.shift = HGC_ITCT_ECC_1B_ADDR_OFF,
+		.msg = "hgc_itct_acc1b_intr found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_ITCT_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF),
+		.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
+		.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
+		.msg = "hgc_iostl_acc1b_intr found:  \
+				memory address is 0x%08X\n",
+		.reg = HGC_LM_DFX_STATUS2,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF),
+		.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
+		.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
+		.msg = "hgc_itctl_acc1b_intr found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_LM_DFX_STATUS2,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF),
+		.msk = HGC_CQE_ECC_1B_ADDR_MSK,
+		.shift = HGC_CQE_ECC_1B_ADDR_OFF,
+		.msg = "hgc_cqe_acc1b_intr found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_CQE_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
+		.msg = "rxm_mem0_acc1b_intr found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
+		.msg = "rxm_mem1_acc1b_intr found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
+		.msg = "rxm_mem2_acc1b_intr found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF),
+		.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
+		.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
+		.msg = "rxm_mem3_acc1b_intr found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS15,
+	},
+};
+
+static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
+		.msk = HGC_DQE_ECC_MB_ADDR_MSK,
+		.shift = HGC_DQE_ECC_MB_ADDR_OFF,
+		.msg = "hgc_dqe_accbad_intr (0x%x) found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_DQE_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
+		.msk = HGC_IOST_ECC_MB_ADDR_MSK,
+		.shift = HGC_IOST_ECC_MB_ADDR_OFF,
+		.msg = "hgc_iost_accbad_intr (0x%x) found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_IOST_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
+		.msk = HGC_ITCT_ECC_MB_ADDR_MSK,
+		.shift = HGC_ITCT_ECC_MB_ADDR_OFF,
+		.msg = "hgc_itct_accbad_intr (0x%x) found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_ITCT_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
+		.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
+		.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
+		.msg = "hgc_iostl_accbad_intr (0x%x) found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_LM_DFX_STATUS2,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
+		.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
+		.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
+		.msg = "hgc_itctl_accbad_intr (0x%x) found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_LM_DFX_STATUS2,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
+		.msk = HGC_CQE_ECC_MB_ADDR_MSK,
+		.shift = HGC_CQE_ECC_MB_ADDR_OFF,
+		.msg = "hgc_cqe_accbad_intr (0x%x) found: \
+				Ram address is 0x%08X\n",
+		.reg = HGC_CQE_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
+		.msg = "rxm_mem0_accbad_intr (0x%x) found: \
+			memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
+		.msg = "rxm_mem1_accbad_intr (0x%x) found: \
+			memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
+		.msg = "rxm_mem2_accbad_intr (0x%x) found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
+		.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
+		.msg = "rxm_mem3_accbad_intr (0x%x) found: \
+				memory address is 0x%08X\n",
+		.reg = HGC_RXM_DFX_STATUS15,
+	},
+};
+
 enum {
 	HISI_SAS_PHY_PHY_UPDOWN,
 	HISI_SAS_PHY_CHNL_INT,
@@ -806,12 +974,14 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
 static void free_device_v2_hw(struct hisi_hba *hisi_hba,
 			      struct hisi_sas_device *sas_dev)
 {
+	DECLARE_COMPLETION_ONSTACK(completion);
 	u64 dev_id = sas_dev->device_id;
-	struct device *dev = hisi_hba->dev;
 	struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
 	u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
 	int i;
 
+	sas_dev->completion = &completion;
+
 	/* SoC bug workaround */
 	if (dev_is_sata(sas_dev->sas_device))
 		clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
@@ -821,28 +991,12 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
 		hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
 				 ENT_INT_SRC3_ITC_INT_MSK);
 
-	/* clear the itct int*/
 	for (i = 0; i < 2; i++) {
-		/* clear the itct table*/
-		reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
-		reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
+		reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
 		hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
+		wait_for_completion(sas_dev->completion);
 
-		udelay(10);
-		reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
-		if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
-			dev_dbg(dev, "got clear ITCT done interrupt\n");
-
-			/* invalid the itct state*/
-			memset(itct, 0, sizeof(struct hisi_sas_itct));
-			hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
-					 ENT_INT_SRC3_ITC_INT_MSK);
-
-			/* clear the itct */
-			hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
-			dev_dbg(dev, "clear ITCT ok\n");
-			break;
-		}
+		memset(itct, 0, sizeof(struct hisi_sas_itct));
 	}
 }
 
@@ -1023,7 +1177,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
-	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffe20fe);
 	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
 	for (i = 0; i < hisi_hba->queue_count; i++)
 		hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
@@ -1332,25 +1486,12 @@ static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 	enable_phy_v2_hw(hisi_hba, phy_no);
 }
 
-static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
-{
-	disable_phy_v2_hw(hisi_hba, phy_no);
-}
-
-static void stop_phys_v2_hw(struct hisi_hba *hisi_hba)
-{
-	int i;
-
-	for (i = 0; i < hisi_hba->n_phy; i++)
-		stop_phy_v2_hw(hisi_hba, i);
-}
-
 static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 	u32 txid_auto;
 
-	stop_phy_v2_hw(hisi_hba, phy_no);
+	disable_phy_v2_hw(hisi_hba, phy_no);
 	if (phy->identify.device_type == SAS_END_DEVICE) {
 		txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
 		hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
@@ -1360,17 +1501,38 @@ static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 	start_phy_v2_hw(hisi_hba, phy_no);
 }
 
-static void start_phys_v2_hw(struct hisi_hba *hisi_hba)
+static void phy_get_events_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
-	int i;
+	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	struct sas_phy *sphy = sas_phy->phy;
+	u32 err4_reg_val, err6_reg_val;
 
-	for (i = 0; i < hisi_hba->n_phy; i++)
-		start_phy_v2_hw(hisi_hba, i);
+	/* loss dword syn, phy reset problem */
+	err4_reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_ERR_CNT4_REG);
+
+	/* disparity err, invalid dword */
+	err6_reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_ERR_CNT6_REG);
+
+	sphy->loss_of_dword_sync_count += (err4_reg_val >> 16) & 0xFFFF;
+	sphy->phy_reset_problem_count += err4_reg_val & 0xFFFF;
+	sphy->invalid_dword_count += (err6_reg_val & 0xFF0000) >> 16;
+	sphy->running_disparity_error_count += err6_reg_val & 0xFF;
 }
 
 static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
 {
-	start_phys_v2_hw(hisi_hba);
+	int i;
+
+	for (i = 0; i < hisi_hba->n_phy; i++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+		struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+		if (!sas_phy->phy->enabled)
+			continue;
+
+		start_phy_v2_hw(hisi_hba, i);
+	}
 }
 
 static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1965,7 +2127,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
 		}
 		case DMA_RX_DATA_LEN_UNDERFLOW:
 		{
-			ts->residual = dma_rx_err_type;
+			ts->residual = trans_tx_fail_type;
 			ts->stat = SAS_DATA_UNDERRUN;
 			break;
 		}
@@ -2091,7 +2253,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
 		}
 		case DMA_RX_DATA_LEN_UNDERFLOW:
 		{
-			ts->residual = dma_rx_err_type;
+			ts->residual = trans_tx_fail_type;
 			ts->stat = SAS_DATA_UNDERRUN;
 			break;
 		}
@@ -2599,6 +2761,7 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
 	struct hisi_hba *hisi_hba = p;
 	u32 irq_msk;
 	int phy_no = 0;
+	irqreturn_t res = IRQ_NONE;
 
 	irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO)
 		   >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff;
@@ -2613,15 +2776,15 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
 			case CHL_INT0_SL_PHY_ENABLE_MSK:
 				/* phy up */
 				if (phy_up_v2_hw(phy_no, hisi_hba) ==
-				    IRQ_NONE)
-					return IRQ_NONE;
+				    IRQ_HANDLED)
+					res = IRQ_HANDLED;
 				break;
 
 			case CHL_INT0_NOT_RDY_MSK:
 				/* phy down */
 				if (phy_down_v2_hw(phy_no, hisi_hba) ==
-				    IRQ_NONE)
-					return IRQ_NONE;
+				    IRQ_HANDLED)
+					res = IRQ_HANDLED;
 				break;
 
 			case (CHL_INT0_NOT_RDY_MSK |
@@ -2631,13 +2794,13 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
 				if (reg_value & BIT(phy_no)) {
 					/* phy up */
 					if (phy_up_v2_hw(phy_no, hisi_hba) ==
-					    IRQ_NONE)
-						return IRQ_NONE;
+					    IRQ_HANDLED)
+						res = IRQ_HANDLED;
 				} else {
 					/* phy down */
 					if (phy_down_v2_hw(phy_no, hisi_hba) ==
-					    IRQ_NONE)
-						return IRQ_NONE;
+					    IRQ_HANDLED)
+						res = IRQ_HANDLED;
 				}
 				break;
 
@@ -2650,7 +2813,7 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
 		phy_no++;
 	}
 
-	return IRQ_HANDLED;
+	return res;
 }
 
 static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -2733,194 +2896,38 @@ static void
 one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
 {
 	struct device *dev = hisi_hba->dev;
-	u32 reg_val;
-
-	if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
-		dev_warn(dev, "hgc_dqe_acc1b_intr found: \
-			       Ram address is 0x%08X\n",
-			 (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >>
-			 HGC_DQE_ECC_1B_ADDR_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
-		dev_warn(dev, "hgc_iost_acc1b_intr found: \
-			       Ram address is 0x%08X\n",
-			 (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >>
-			 HGC_IOST_ECC_1B_ADDR_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
-		dev_warn(dev, "hgc_itct_acc1b_intr found: \
-			       Ram address is 0x%08X\n",
-			 (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >>
-			 HGC_ITCT_ECC_1B_ADDR_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
-		dev_warn(dev, "hgc_iostl_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
-			 HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
-		dev_warn(dev, "hgc_itctl_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
-			 HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
-		dev_warn(dev, "hgc_cqe_acc1b_intr found: \
-			       Ram address is 0x%08X\n",
-			 (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >>
-			 HGC_CQE_ECC_1B_ADDR_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem0_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
-			 HGC_RXM_DFX_STATUS14_MEM0_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem1_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
-			 HGC_RXM_DFX_STATUS14_MEM1_OFF);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem2_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
-			 HGC_RXM_DFX_STATUS14_MEM2_OFF);
-	}
+	const struct hisi_sas_hw_error *ecc_error;
+	u32 val;
+	int i;
 
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
-		dev_warn(dev, "rxm_mem3_acc1b_intr found: \
-			       memory address is 0x%08X\n",
-			 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
-			 HGC_RXM_DFX_STATUS15_MEM3_OFF);
+	for (i = 0; i < ARRAY_SIZE(one_bit_ecc_errors); i++) {
+		ecc_error = &one_bit_ecc_errors[i];
+		if (irq_value & ecc_error->irq_msk) {
+			val = hisi_sas_read32(hisi_hba, ecc_error->reg);
+			val &= ecc_error->msk;
+			val >>= ecc_error->shift;
+			dev_warn(dev, ecc_error->msg, val);
+		}
 	}
-
 }
 
 static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
 		u32 irq_value)
 {
-	u32 reg_val;
 	struct device *dev = hisi_hba->dev;
+	const struct hisi_sas_hw_error *ecc_error;
+	u32 val;
+	int i;
 
-	if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
-		dev_warn(dev, "hgc_dqe_accbad_intr (0x%x) found: \
-		       Ram address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >>
-		      HGC_DQE_ECC_MB_ADDR_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
-		dev_warn(dev, "hgc_iost_accbad_intr (0x%x) found: \
-		       Ram address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >>
-		      HGC_IOST_ECC_MB_ADDR_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
-		dev_warn(dev,"hgc_itct_accbad_intr (0x%x) found: \
-		       Ram address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >>
-		      HGC_ITCT_ECC_MB_ADDR_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
-		dev_warn(dev, "hgc_iostl_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
-		      HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
-		dev_warn(dev, "hgc_itctl_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
-		      HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
-		dev_warn(dev, "hgc_cqe_accbad_intr (0x%x) found: \
-		       Ram address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >>
-		      HGC_CQE_ECC_MB_ADDR_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem0_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
-		      HGC_RXM_DFX_STATUS14_MEM0_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem1_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
-		      HGC_RXM_DFX_STATUS14_MEM1_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
-		dev_warn(dev, "rxm_mem2_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
-		      HGC_RXM_DFX_STATUS14_MEM2_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
-	}
-
-	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) {
-		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
-		dev_warn(dev, "rxm_mem3_accbad_intr (0x%x) found: \
-		       memory address is 0x%08X\n",
-		      irq_value,
-		      (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
-		      HGC_RXM_DFX_STATUS15_MEM3_OFF);
-		queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+	for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) {
+		ecc_error = &multi_bit_ecc_errors[i];
+		if (irq_value & ecc_error->irq_msk) {
+			val = hisi_sas_read32(hisi_hba, ecc_error->reg);
+			val &= ecc_error->msk;
+			val >>= ecc_error->shift;
+			dev_warn(dev, ecc_error->msg, irq_value, val);
+			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+		}
 	}
 
 	return;
@@ -3053,8 +3060,20 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
 			      irq_value);
 			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
 		}
+
+		if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
+			u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+			u32 dev_id = reg_val & ITCT_DEV_MSK;
+			struct hisi_sas_device *sas_dev =
+					&hisi_hba->devices[dev_id];
+
+			hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+			dev_dbg(dev, "clear ITCT ok\n");
+			complete(sas_dev->completion);
+		}
 	}
 
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
 
 	return IRQ_HANDLED;
@@ -3251,97 +3270,92 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
 {
 	struct platform_device *pdev = hisi_hba->platform_dev;
 	struct device *dev = &pdev->dev;
-	int i, irq, rc, irq_map[128];
-
+	int irq, rc, irq_map[128];
+	int i, phy_no, fatal_no, queue_no, k;
 
 	for (i = 0; i < 128; i++)
 		irq_map[i] = platform_get_irq(pdev, i);
 
 	for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
-		int idx = i;
-
-		irq = irq_map[idx + 1]; /* Phy up/down is irq1 */
-		if (!irq) {
-			dev_err(dev, "irq init: fail map phy interrupt %d\n",
-				idx);
-			return -ENOENT;
-		}
-
+		irq = irq_map[i + 1]; /* Phy up/down is irq1 */
 		rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
 				      DRV_NAME " phy", hisi_hba);
 		if (rc) {
 			dev_err(dev, "irq init: could not request "
 				"phy interrupt %d, rc=%d\n",
 				irq, rc);
-			return -ENOENT;
+			rc = -ENOENT;
+			goto free_phy_int_irqs;
 		}
 	}
 
-	for (i = 0; i < hisi_hba->n_phy; i++) {
-		struct hisi_sas_phy *phy = &hisi_hba->phy[i];
-		int idx = i + 72; /* First SATA interrupt is irq72 */
-
-		irq = irq_map[idx];
-		if (!irq) {
-			dev_err(dev, "irq init: fail map phy interrupt %d\n",
-				idx);
-			return -ENOENT;
-		}
+	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 
+		irq = irq_map[phy_no + 72];
 		rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
 				      DRV_NAME " sata", phy);
 		if (rc) {
 			dev_err(dev, "irq init: could not request "
 				"sata interrupt %d, rc=%d\n",
 				irq, rc);
-			return -ENOENT;
+			rc = -ENOENT;
+			goto free_sata_int_irqs;
 		}
 	}
 
-	for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) {
-		int idx = i;
-
-		irq = irq_map[idx + 81];
-		if (!irq) {
-			dev_err(dev, "irq init: fail map fatal interrupt %d\n",
-				idx);
-			return -ENOENT;
-		}
-
-		rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
+	for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
+		irq = irq_map[fatal_no + 81];
+		rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
 				      DRV_NAME " fatal", hisi_hba);
 		if (rc) {
 			dev_err(dev,
 				"irq init: could not request fatal interrupt %d, rc=%d\n",
 				irq, rc);
-			return -ENOENT;
+			rc = -ENOENT;
+			goto free_fatal_int_irqs;
 		}
 	}
 
-	for (i = 0; i < hisi_hba->queue_count; i++) {
-		int idx = i + 96; /* First cq interrupt is irq96 */
-		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+	for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
+		struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
 		struct tasklet_struct *t = &cq->tasklet;
 
-		irq = irq_map[idx];
-		if (!irq) {
-			dev_err(dev,
-				"irq init: could not map cq interrupt %d\n",
-				idx);
-			return -ENOENT;
-		}
+		irq = irq_map[queue_no + 96];
 		rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
-				      DRV_NAME " cq", &hisi_hba->cq[i]);
+				      DRV_NAME " cq", cq);
 		if (rc) {
 			dev_err(dev,
 				"irq init: could not request cq interrupt %d, rc=%d\n",
 				irq, rc);
-			return -ENOENT;
+			rc = -ENOENT;
+			goto free_cq_int_irqs;
 		}
 		tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
 	}
 
 	return 0;
+
+free_cq_int_irqs:
+	for (k = 0; k < queue_no; k++) {
+		struct hisi_sas_cq *cq = &hisi_hba->cq[k];
+
+		free_irq(irq_map[k + 96], cq);
+		tasklet_kill(&cq->tasklet);
+	}
+free_fatal_int_irqs:
+	for (k = 0; k < fatal_no; k++)
+		free_irq(irq_map[k + 81], hisi_hba);
+free_sata_int_irqs:
+	for (k = 0; k < phy_no; k++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[k];
+
+		free_irq(irq_map[k + 72], phy);
+	}
+free_phy_int_irqs:
+	for (k = 0; k < i; k++)
+		free_irq(irq_map[k + 1], hisi_hba);
+	return rc;
 }
 
 static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
@@ -3383,19 +3397,21 @@ static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba)
 		synchronize_irq(platform_get_irq(pdev, i));
 }
 
+
+static u32 get_phys_state_v2_hw(struct hisi_hba *hisi_hba)
+{
+	return hisi_sas_read32(hisi_hba, PHY_STATE);
+}
+
 static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
 {
 	struct device *dev = hisi_hba->dev;
-	u32 old_state, state;
 	int rc, cnt;
-	int phy_no;
-
-	old_state = hisi_sas_read32(hisi_hba, PHY_STATE);
 
 	interrupt_disable_v2_hw(hisi_hba);
 	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
 
-	stop_phys_v2_hw(hisi_hba);
+	hisi_sas_stop_phys(hisi_hba);
 
 	mdelay(10);
 
@@ -3425,22 +3441,6 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
 
 	phys_reject_stp_links_v2_hw(hisi_hba);
 
-	/* Re-enable the PHYs */
-	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
-		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
-		struct asd_sas_phy *sas_phy = &phy->sas_phy;
-
-		if (sas_phy->enabled)
-			start_phy_v2_hw(hisi_hba, phy_no);
-	}
-
-	/* Wait for the PHYs to come up and read the PHY state */
-	msleep(1000);
-
-	state = hisi_sas_read32(hisi_hba, PHY_STATE);
-
-	hisi_sas_rescan_topology(hisi_hba, old_state, state);
-
 	return 0;
 }
 
@@ -3463,11 +3463,13 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
 	.phy_enable = enable_phy_v2_hw,
 	.phy_disable = disable_phy_v2_hw,
 	.phy_hard_reset = phy_hard_reset_v2_hw,
+	.get_events = phy_get_events_v2_hw,
 	.phy_set_linkrate = phy_set_linkrate_v2_hw,
 	.phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
 	.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
 	.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
 	.soft_reset = soft_reset_v2_hw,
+	.get_phys_state = get_phys_state_v2_hw,
 };
 
 static int hisi_sas_v2_probe(struct platform_device *pdev)
@@ -3491,10 +3493,17 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
 {
 	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
 	struct hisi_hba *hisi_hba = sha->lldd_ha;
+	int i;
 
 	if (timer_pending(&hisi_hba->timer))
 		del_timer(&hisi_hba->timer);
 
+	for (i = 0; i < hisi_hba->queue_count; i++) {
+		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+
+		tasklet_kill(&cq->tasklet);
+	}
+
 	return hisi_sas_remove(pdev);
 }
 
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 83d2dca1c650c3e0c32568ac1c6fba5505ece351..2e5fa9717be89acdcc3b1565c0b1c85a5ab089c1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -23,14 +23,11 @@
 #define PHY_STATE			0x24
 #define PHY_PORT_NUM_MA			0x28
 #define PHY_CONN_RATE			0x30
-#define AXI_AHB_CLK_CFG			0x3c
 #define ITCT_CLR			0x44
 #define ITCT_CLR_EN_OFF			16
 #define ITCT_CLR_EN_MSK			(0x1 << ITCT_CLR_EN_OFF)
 #define ITCT_DEV_OFF			0
 #define ITCT_DEV_MSK			(0x7ff << ITCT_DEV_OFF)
-#define AXI_USER1			0x48
-#define AXI_USER2			0x4c
 #define IO_SATA_BROKEN_MSG_ADDR_LO	0x58
 #define IO_SATA_BROKEN_MSG_ADDR_HI	0x5c
 #define SATA_INITI_D2H_STORE_ADDR_LO	0x60
@@ -137,6 +134,7 @@
 #define TX_HARDRST_MSK          (0x1 << TX_HARDRST_OFF)
 #define RX_IDAF_DWORD0			(PORT_BASE + 0xc4)
 #define RXOP_CHECK_CFG_H		(PORT_BASE + 0xfc)
+#define STP_LINK_TIMER			(PORT_BASE + 0x120)
 #define SAS_SSP_CON_TIMER_CFG		(PORT_BASE + 0x134)
 #define SAS_SMP_CON_TIMER_CFG		(PORT_BASE + 0x138)
 #define SAS_STP_CON_TIMER_CFG		(PORT_BASE + 0x13c)
@@ -167,6 +165,31 @@
 #define PHYCTRL_PHY_ENA_MSK		(PORT_BASE + 0x2bc)
 #define SL_RX_BCAST_CHK_MSK		(PORT_BASE + 0x2c0)
 #define PHYCTRL_OOB_RESTART_MSK		(PORT_BASE + 0x2c4)
+#define DMA_TX_STATUS			(PORT_BASE + 0x2d0)
+#define DMA_TX_STATUS_BUSY_OFF		0
+#define DMA_TX_STATUS_BUSY_MSK		(0x1 << DMA_TX_STATUS_BUSY_OFF)
+#define DMA_RX_STATUS			(PORT_BASE + 0x2e8)
+#define DMA_RX_STATUS_BUSY_OFF		0
+#define DMA_RX_STATUS_BUSY_MSK		(0x1 << DMA_RX_STATUS_BUSY_OFF)
+
+#define MAX_ITCT_HW			4096 /* max the hw can support */
+#define DEFAULT_ITCT_HW		2048 /* reset value, not reprogrammed */
+#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
+#error Max ITCT exceeded
+#endif
+
+#define AXI_MASTER_CFG_BASE		(0x5000)
+#define AM_CTRL_GLOBAL			(0x0)
+#define AM_CURR_TRANS_RETURN	(0x150)
+
+#define AM_CFG_MAX_TRANS		(0x5010)
+#define AM_CFG_SINGLE_PORT_MAX_TRANS	(0x5014)
+#define AXI_CFG					(0x5100)
+#define AM_ROB_ECC_ERR_ADDR		(0x510c)
+#define AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF	0
+#define AM_ROB_ECC_ONEBIT_ERR_ADDR_MSK	(0xff << AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF)
+#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF	8
+#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK	(0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
 
 /* HW dma structures */
 /* Delivery queue header */
@@ -354,8 +377,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 	/* Global registers init */
 	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
 			 (u32)((1ULL << hisi_hba->queue_count) - 1));
-	hisi_sas_write32(hisi_hba, AXI_USER1, 0x0);
-	hisi_sas_write32(hisi_hba, AXI_USER2, 0x40000060);
 	hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
 	hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
 	hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
@@ -371,15 +392,14 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 	hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
 	hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
 	hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
-	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x0);
 	hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
 	hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
 	for (i = 0; i < hisi_hba->queue_count; i++)
 		hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
 
-	hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
 	hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
-	hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff07fff);
+	hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE, 0x30000);
 
 	for (i = 0; i < hisi_hba->n_phy; i++) {
 		hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801);
@@ -389,7 +409,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 		hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
-		hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x83f801fc);
 		hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
@@ -398,9 +417,11 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
 		hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa);
 		hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG,
-				     0xa0064);
+				     0xa03e8);
 		hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG,
-				     0xa0064);
+				     0xa03e8);
+		hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER,
+				     0x7f7a120);
 	}
 	for (i = 0; i < hisi_hba->queue_count; i++) {
 		/* Delivery queue */
@@ -578,8 +599,6 @@ static void free_device_v3_hw(struct hisi_hba *hisi_hba,
 		memset(itct, 0, sizeof(struct hisi_sas_itct));
 		hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
 				 ENT_INT_SRC3_ITC_INT_MSK);
-		hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
-		hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
 
 		/* clear the itct */
 		hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
@@ -610,8 +629,52 @@ static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
 					1 << CFG_ABT_SET_IPTT_DONE_OFF);
 }
 
+static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
+{
+	struct device *dev = hisi_hba->dev;
+	int ret;
+	u32 val;
+
+	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+
+	/* Disable all of the PHYs */
+	hisi_sas_stop_phys(hisi_hba);
+	udelay(50);
+
+	/* Ensure axi bus idle */
+	ret = readl_poll_timeout(hisi_hba->regs + AXI_CFG, val, !val,
+			20000, 1000000);
+	if (ret) {
+		dev_err(dev, "axi bus is not idle, ret = %d!\n", ret);
+		return -EIO;
+	}
+
+	if (ACPI_HANDLE(dev)) {
+		acpi_status s;
+
+		s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL);
+		if (ACPI_FAILURE(s)) {
+			dev_err(dev, "Reset failed\n");
+			return -EIO;
+		}
+	} else
+		dev_err(dev, "no reset method!\n");
+
+	return 0;
+}
+
 static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
 {
+	struct device *dev = hisi_hba->dev;
+	int rc;
+
+	rc = reset_hw_v3_hw(hisi_hba);
+	if (rc) {
+		dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+		return rc;
+	}
+
+	msleep(100);
 	init_reg_v3_hw(hisi_hba);
 
 	return 0;
@@ -640,25 +703,12 @@ static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 	enable_phy_v3_hw(hisi_hba, phy_no);
 }
 
-static void stop_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
-{
-	disable_phy_v3_hw(hisi_hba, phy_no);
-}
-
-static void start_phys_v3_hw(struct hisi_hba *hisi_hba)
-{
-	int i;
-
-	for (i = 0; i < hisi_hba->n_phy; i++)
-		start_phy_v3_hw(hisi_hba, i);
-}
-
 static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 	u32 txid_auto;
 
-	stop_phy_v3_hw(hisi_hba, phy_no);
+	disable_phy_v3_hw(hisi_hba, phy_no);
 	if (phy->identify.device_type == SAS_END_DEVICE) {
 		txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
 		hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
@@ -675,7 +725,17 @@ enum sas_linkrate phy_get_max_linkrate_v3_hw(void)
 
 static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
 {
-	start_phys_v3_hw(hisi_hba);
+	int i;
+
+	for (i = 0; i < hisi_hba->n_phy; i++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+		struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+		if (!sas_phy->phy->enabled)
+			continue;
+
+		start_phy_v3_hw(hisi_hba, i);
+	}
 }
 
 static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1140,7 +1200,6 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
 
 static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
 {
-	int res = 0;
 	u32 phy_state, sl_ctrl, txid_auto;
 	struct device *dev = hisi_hba->dev;
 
@@ -1161,7 +1220,7 @@ static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
 	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
 	hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
 
-	return res;
+	return 0;
 }
 
 static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -1259,7 +1318,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
 		if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
 			hisi_sas_phy_write32(hisi_hba, phy_no,
 					CHL_INT0, irq_value0
-					& (~CHL_INT0_HOTPLUG_TOUT_MSK)
+					& (~CHL_INT0_SL_RX_BCST_ACK_MSK)
 					& (~CHL_INT0_SL_PHY_ENABLE_MSK)
 					& (~CHL_INT0_NOT_RDY_MSK));
 		}
@@ -1620,6 +1679,104 @@ static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
 	return 0;
 }
 
+static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
+		struct sas_phy_linkrates *r)
+{
+	u32 prog_phy_link_rate =
+		hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	int i;
+	enum sas_linkrate min, max;
+	u32 rate_mask = 0;
+
+	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+		max = sas_phy->phy->maximum_linkrate;
+		min = r->minimum_linkrate;
+	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+		max = r->maximum_linkrate;
+		min = sas_phy->phy->minimum_linkrate;
+	} else
+		return;
+
+	sas_phy->phy->maximum_linkrate = max;
+	sas_phy->phy->minimum_linkrate = min;
+
+	min -= SAS_LINK_RATE_1_5_GBPS;
+	max -= SAS_LINK_RATE_1_5_GBPS;
+
+	for (i = 0; i <= max; i++)
+		rate_mask |= 1 << (i * 2);
+
+	prog_phy_link_rate &= ~0xff;
+	prog_phy_link_rate |= rate_mask;
+
+	hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+			prog_phy_link_rate);
+
+	phy_hard_reset_v3_hw(hisi_hba, phy_no);
+}
+
+static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
+{
+	struct pci_dev *pdev = hisi_hba->pci_dev;
+	int i;
+
+	synchronize_irq(pci_irq_vector(pdev, 1));
+	synchronize_irq(pci_irq_vector(pdev, 2));
+	synchronize_irq(pci_irq_vector(pdev, 11));
+	for (i = 0; i < hisi_hba->queue_count; i++) {
+		hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
+		synchronize_irq(pci_irq_vector(pdev, i + 16));
+	}
+
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff);
+
+	for (i = 0; i < hisi_hba->n_phy; i++) {
+		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff);
+		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1);
+		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1);
+		hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1);
+	}
+}
+
+static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba)
+{
+	return hisi_sas_read32(hisi_hba, PHY_STATE);
+}
+
+static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
+{
+	struct device *dev = hisi_hba->dev;
+	int rc;
+	u32 status;
+
+	interrupt_disable_v3_hw(hisi_hba);
+	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+
+	hisi_sas_stop_phys(hisi_hba);
+
+	mdelay(10);
+
+	hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
+
+	/* wait until bus idle */
+	rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
+		AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
+	if (rc) {
+		dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+		return rc;
+	}
+
+	hisi_sas_init_mem(hisi_hba);
+
+	return hw_init_v3_hw(hisi_hba);
+}
+
 static const struct hisi_sas_hw hisi_sas_v3_hw = {
 	.hw_init = hisi_sas_v3_init,
 	.setup_itct = setup_itct_v3_hw,
@@ -1640,7 +1797,10 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
 	.phy_disable = disable_phy_v3_hw,
 	.phy_hard_reset = phy_hard_reset_v3_hw,
 	.phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
+	.phy_set_linkrate = phy_set_linkrate_v3_hw,
 	.dereg_device = dereg_device_v3_hw,
+	.soft_reset = soft_reset_v3_hw,
+	.get_phys_state = get_phys_state_v3_hw,
 };
 
 static struct Scsi_Host *
@@ -1651,8 +1811,10 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
 	struct device *dev = &pdev->dev;
 
 	shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
-	if (!shost)
-		goto err_out;
+	if (!shost) {
+		dev_err(dev, "shost alloc failed\n");
+		return NULL;
+	}
 	hisi_hba = shost_priv(shost);
 
 	hisi_hba->hw = &hisi_sas_v3_hw;
@@ -1673,6 +1835,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
 
 	return shost;
 err_out:
+	scsi_host_put(shost);
 	dev_err(dev, "shost alloc failed\n");
 	return NULL;
 }
@@ -1781,7 +1944,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 err_out_register_ha:
 	scsi_remove_host(shost);
 err_out_ha:
-	kfree(shost);
+	scsi_host_put(shost);
 err_out_regions:
 	pci_release_regions(pdev);
 err_out_disable_device:
@@ -1801,6 +1964,7 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
 
 		free_irq(pci_irq_vector(pdev, i+16), cq);
+		tasklet_kill(&cq->tasklet);
 	}
 	pci_free_irq_vectors(pdev);
 }
@@ -1810,14 +1974,16 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
 	struct device *dev = &pdev->dev;
 	struct sas_ha_struct *sha = dev_get_drvdata(dev);
 	struct hisi_hba *hisi_hba = sha->lldd_ha;
+	struct Scsi_Host *shost = sha->core.shost;
 
 	sas_unregister_ha(sha);
 	sas_remove_host(sha->core.shost);
 
-	hisi_sas_free(hisi_hba);
 	hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
+	hisi_sas_free(hisi_hba);
+	scsi_host_put(shost);
 }
 
 enum {
@@ -1839,7 +2005,6 @@ static struct pci_driver sas_v3_pci_driver = {
 
 module_pci_driver(sas_v3_pci_driver);
 
-MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f7cdb28bd38fbadc6105906f56fe813346c299c..9abe81021484dc146755d0475a51ccd26231372d 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -81,11 +81,8 @@ MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
 MODULE_VERSION(HPSA_DRIVER_VERSION);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("cciss");
 
-static int hpsa_allow_any;
-module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(hpsa_allow_any,
-		"Allow hpsa driver to access unknown HP Smart Array hardware");
 static int hpsa_simple_mode;
 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(hpsa_simple_mode,
@@ -148,6 +145,8 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
 	{PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+	{PCI_VENDOR_ID_COMPAQ,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
+		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 	{0,}
 };
 
@@ -158,6 +157,26 @@ MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
  *  access = Address of the struct of function pointers
  */
 static struct board_type products[] = {
+	{0x40700E11, "Smart Array 5300", &SA5A_access},
+	{0x40800E11, "Smart Array 5i", &SA5B_access},
+	{0x40820E11, "Smart Array 532", &SA5B_access},
+	{0x40830E11, "Smart Array 5312", &SA5B_access},
+	{0x409A0E11, "Smart Array 641", &SA5A_access},
+	{0x409B0E11, "Smart Array 642", &SA5A_access},
+	{0x409C0E11, "Smart Array 6400", &SA5A_access},
+	{0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
+	{0x40910E11, "Smart Array 6i", &SA5A_access},
+	{0x3225103C, "Smart Array P600", &SA5A_access},
+	{0x3223103C, "Smart Array P800", &SA5A_access},
+	{0x3234103C, "Smart Array P400", &SA5A_access},
+	{0x3235103C, "Smart Array P400i", &SA5A_access},
+	{0x3211103C, "Smart Array E200i", &SA5A_access},
+	{0x3212103C, "Smart Array E200", &SA5A_access},
+	{0x3213103C, "Smart Array E200i", &SA5A_access},
+	{0x3214103C, "Smart Array E200i", &SA5A_access},
+	{0x3215103C, "Smart Array E200i", &SA5A_access},
+	{0x3237103C, "Smart Array E500", &SA5A_access},
+	{0x323D103C, "Smart Array P700m", &SA5A_access},
 	{0x3241103C, "Smart Array P212", &SA5_access},
 	{0x3243103C, "Smart Array P410", &SA5_access},
 	{0x3245103C, "Smart Array P410i", &SA5_access},
@@ -278,7 +297,8 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
 			       u64 *cfg_offset);
 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
 				    unsigned long *memory_bar);
-static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
+				bool *legacy_board);
 static int wait_for_device_to_become_ready(struct ctlr_info *h,
 					   unsigned char lunaddr[],
 					   int reply_queue);
@@ -866,6 +886,16 @@ static ssize_t host_show_ctlr_num(struct device *dev,
 	return snprintf(buf, 20, "%d\n", h->ctlr);
 }
 
+static ssize_t host_show_legacy_board(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
+
+	h = shost_to_hba(shost);
+	return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
+}
+
 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
@@ -891,6 +921,8 @@ static DEVICE_ATTR(lockup_detected, S_IRUGO,
 	host_show_lockup_detected, NULL);
 static DEVICE_ATTR(ctlr_num, S_IRUGO,
 	host_show_ctlr_num, NULL);
+static DEVICE_ATTR(legacy_board, S_IRUGO,
+	host_show_legacy_board, NULL);
 
 static struct device_attribute *hpsa_sdev_attrs[] = {
 	&dev_attr_raid_level,
@@ -912,6 +944,7 @@ static struct device_attribute *hpsa_shost_attrs[] = {
 	&dev_attr_raid_offload_debug,
 	&dev_attr_lockup_detected,
 	&dev_attr_ctlr_num,
+	&dev_attr_legacy_board,
 	NULL,
 };
 
@@ -3565,7 +3598,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
 	memset(scsi3addr, 0, sizeof(scsi3addr));
 	if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
 		buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
-		rc = -1;
+		rc = -EAGAIN;
 		goto out;
 	}
 	if (extended_response)
@@ -3578,16 +3611,19 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
 	if (ei->CommandStatus != 0 &&
 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
 		hpsa_scsi_interpret_error(h, c);
-		rc = -1;
+		rc = -EIO;
 	} else {
 		struct ReportLUNdata *rld = buf;
 
 		if (rld->extended_response_flag != extended_response) {
-			dev_err(&h->pdev->dev,
-				"report luns requested format %u, got %u\n",
-				extended_response,
-				rld->extended_response_flag);
-			rc = -1;
+			if (!h->legacy_board) {
+				dev_err(&h->pdev->dev,
+					"report luns requested format %u, got %u\n",
+					extended_response,
+					rld->extended_response_flag);
+				rc = -EINVAL;
+			} else
+				rc = -EOPNOTSUPP;
 		}
 	}
 out:
@@ -3603,7 +3639,7 @@ static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
 
 	rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
 				      HPSA_REPORT_PHYS_EXTENDED);
-	if (!rc || !hpsa_allow_any)
+	if (!rc || rc != -EOPNOTSUPP)
 		return rc;
 
 	/* REPORT PHYS EXTENDED is not supported */
@@ -3791,7 +3827,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
 	memset(this_device->device_id, 0,
 		sizeof(this_device->device_id));
 	if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
-		sizeof(this_device->device_id)))
+		sizeof(this_device->device_id)) < 0)
 		dev_err(&h->pdev->dev,
 			"hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
 			h->ctlr, __func__,
@@ -3809,6 +3845,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,
 		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
 			hpsa_get_ioaccel_status(h, scsi3addr, this_device);
 		volume_offline = hpsa_volume_offline(h, scsi3addr);
+		if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
+		    h->legacy_board) {
+			/*
+			 * Legacy boards might not support volume status
+			 */
+			dev_info(&h->pdev->dev,
+				 "C0:T%d:L%d Volume status not available, assuming online.\n",
+				 this_device->target, this_device->lun);
+			volume_offline = 0;
+		}
 		this_device->volume_offline = volume_offline;
 		if (volume_offline == HPSA_LV_FAILED) {
 			rc = HPSA_LV_FAILED;
@@ -6571,7 +6617,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
 		default:
 			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
 			BUG();
-			return -1;
 		}
 	} else if (cmd_type == TYPE_MSG) {
 		switch (cmd) {
@@ -7232,7 +7277,8 @@ static int hpsa_interrupt_mode(struct ctlr_info *h)
 	return 0;
 }
 
-static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
+				bool *legacy_board)
 {
 	int i;
 	u32 subsystem_vendor_id, subsystem_device_id;
@@ -7242,17 +7288,24 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
 	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
 		    subsystem_vendor_id;
 
+	if (legacy_board)
+		*legacy_board = false;
 	for (i = 0; i < ARRAY_SIZE(products); i++)
-		if (*board_id == products[i].board_id)
+		if (*board_id == products[i].board_id) {
+			if (products[i].access != &SA5A_access &&
+			    products[i].access != &SA5B_access)
+				return i;
+			dev_warn(&pdev->dev,
+				 "legacy board ID: 0x%08x\n",
+				 *board_id);
+			if (legacy_board)
+			    *legacy_board = true;
 			return i;
+		}
 
-	if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
-		subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
-		!hpsa_allow_any) {
-		dev_warn(&pdev->dev, "unrecognized board ID: "
-			"0x%08x, ignoring.\n", *board_id);
-			return -ENODEV;
-	}
+	dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
+	if (legacy_board)
+		*legacy_board = true;
 	return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
 }
 
@@ -7555,13 +7608,14 @@ static void hpsa_free_pci_init(struct ctlr_info *h)
 static int hpsa_pci_init(struct ctlr_info *h)
 {
 	int prod_index, err;
+	bool legacy_board;
 
-	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
+	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
 	if (prod_index < 0)
 		return prod_index;
 	h->product_name = products[prod_index].product_name;
 	h->access = *(products[prod_index].access);
-
+	h->legacy_board = legacy_board;
 	pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
 			       PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
 
@@ -8241,7 +8295,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (number_of_controllers == 0)
 		printk(KERN_INFO DRIVER_NAME "\n");
 
-	rc = hpsa_lookup_board_id(pdev, &board_id);
+	rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
 	if (rc < 0) {
 		dev_warn(&pdev->dev, "Board ID not found\n");
 		return rc;
@@ -9443,14 +9497,6 @@ hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
 	return -EINVAL;
 }
 
-/* SMP = Serial Management Protocol */
-static int
-hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
-struct request *req)
-{
-	return -EINVAL;
-}
-
 static struct sas_function_template hpsa_sas_transport_functions = {
 	.get_linkerrors = hpsa_sas_get_linkerrors,
 	.get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
@@ -9460,7 +9506,6 @@ static struct sas_function_template hpsa_sas_transport_functions = {
 	.phy_setup = hpsa_sas_phy_setup,
 	.phy_release = hpsa_sas_phy_release,
 	.set_phy_speed = hpsa_sas_phy_speed,
-	.smp_handler = hpsa_sas_smp_handler,
 };
 
 /*
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 1c49741bc63935ea352ef4ffca65192abd4c6716..018f980a701ce406e9edf0119240c39df47b9858 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -293,6 +293,7 @@ struct ctlr_info {
 	int	drv_req_rescan;
 	int	raid_offload_debug;
 	int     discovery_polling;
+	int     legacy_board;
 	struct  ReportLUNdata *lastlogicals;
 	int	needs_abort_tags_swizzled;
 	struct workqueue_struct *resubmit_wq;
@@ -447,6 +448,23 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
 	}
 }
 
+/*
+ *  Variant of the above; 0x04 turns interrupts off...
+ */
+static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val)
+{
+	if (val) { /* Turn interrupts on */
+		h->interrupts_enabled = 1;
+		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	} else { /* Turn them off */
+		h->interrupts_enabled = 0;
+		writel(SA5B_INTR_OFF,
+		       h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	}
+}
+
 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
 {
 	if (val) { /* turn on interrupts */
@@ -549,6 +567,14 @@ static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
 		true : false;
 }
 
+/*
+ *      Returns true if an interrupt is pending..
+ */
+static bool SA5B_intr_pending(struct ctlr_info *h)
+{
+	return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING;
+}
+
 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX  0x1A0
 #define IOACCEL_MODE1_PRODUCER_INDEX     0x1B8
 #define IOACCEL_MODE1_CONSUMER_INDEX     0x1BC
@@ -581,38 +607,53 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
 }
 
 static struct access_method SA5_access = {
-	.submit_command = SA5_submit_command,
-	.set_intr_mask = SA5_intr_mask,
-	.intr_pending = SA5_intr_pending,
-	.command_completed = SA5_completed,
+	.submit_command =	SA5_submit_command,
+	.set_intr_mask =	SA5_intr_mask,
+	.intr_pending =		SA5_intr_pending,
+	.command_completed =	SA5_completed,
+};
+
+/* Duplicate entry of the above to mark unsupported boards */
+static struct access_method SA5A_access = {
+	.submit_command =	SA5_submit_command,
+	.set_intr_mask =	SA5_intr_mask,
+	.intr_pending =		SA5_intr_pending,
+	.command_completed =	SA5_completed,
+};
+
+static struct access_method SA5B_access = {
+	.submit_command =	SA5_submit_command,
+	.set_intr_mask =	SA5B_intr_mask,
+	.intr_pending =		SA5B_intr_pending,
+	.command_completed =	SA5_completed,
 };
 
 static struct access_method SA5_ioaccel_mode1_access = {
-	.submit_command = SA5_submit_command,
-	.set_intr_mask = SA5_performant_intr_mask,
-	.intr_pending = SA5_ioaccel_mode1_intr_pending,
-	.command_completed = SA5_ioaccel_mode1_completed,
+	.submit_command =	SA5_submit_command,
+	.set_intr_mask =	SA5_performant_intr_mask,
+	.intr_pending =		SA5_ioaccel_mode1_intr_pending,
+	.command_completed =	SA5_ioaccel_mode1_completed,
 };
 
 static struct access_method SA5_ioaccel_mode2_access = {
-	.submit_command = SA5_submit_command_ioaccel2,
-	.set_intr_mask = SA5_performant_intr_mask,
-	.intr_pending = SA5_performant_intr_pending,
-	.command_completed = SA5_performant_completed,
+	.submit_command =	SA5_submit_command_ioaccel2,
+	.set_intr_mask =	SA5_performant_intr_mask,
+	.intr_pending =		SA5_performant_intr_pending,
+	.command_completed =	SA5_performant_completed,
 };
 
 static struct access_method SA5_performant_access = {
-	.submit_command = SA5_submit_command,
-	.set_intr_mask = SA5_performant_intr_mask,
-	.intr_pending = SA5_performant_intr_pending,
-	.command_completed = SA5_performant_completed,
+	.submit_command =	SA5_submit_command,
+	.set_intr_mask =	SA5_performant_intr_mask,
+	.intr_pending =		SA5_performant_intr_pending,
+	.command_completed =	SA5_performant_completed,
 };
 
 static struct access_method SA5_performant_access_no_read = {
-	.submit_command = SA5_submit_command_no_read,
-	.set_intr_mask = SA5_performant_intr_mask,
-	.intr_pending = SA5_performant_intr_pending,
-	.command_completed = SA5_performant_completed,
+	.submit_command =	SA5_submit_command_no_read,
+	.set_intr_mask =	SA5_performant_intr_mask,
+	.intr_pending =		SA5_performant_intr_pending,
+	.command_completed =	SA5_performant_completed,
 };
 
 struct board_type {
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 7226226f7383533b7bff3b5ecad922d34beb0ba0..2fad7f03aa02089458f5729330b322444ea0c72c 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1106,12 +1106,10 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
 
 static int hptiop_reset(struct scsi_cmnd *scp)
 {
-	struct Scsi_Host * host = scp->device->host;
-	struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
+	struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata;
 
-	printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
-			scp->device->host->host_no, scp->device->channel,
-			scp->device->id, scp);
+	printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n",
+	       scp->device->host->host_no, -1, -1);
 
 	return hptiop_reset_hba(hba)? FAILED : SUCCESS;
 }
@@ -1179,8 +1177,7 @@ static struct scsi_host_template driver_template = {
 	.module                     = THIS_MODULE,
 	.name                       = driver_name,
 	.queuecommand               = hptiop_queuecommand,
-	.eh_device_reset_handler    = hptiop_reset,
-	.eh_bus_reset_handler       = hptiop_reset,
+	.eh_host_reset_handler      = hptiop_reset,
 	.info                       = hptiop_info,
 	.emulated                   = 0,
 	.use_clustering             = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index cc4e05be8d4a24aa01782a62ca2fd3d157d24777..b491af31a5f8e0289c1a3dea2f1a588ff4a3796e 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2528,16 +2528,12 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
  **/
 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-	int rc, block_rc;
+	int rc;
 	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
 
-	block_rc = fc_block_scsi_eh(cmd);
 	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
 	rc = ibmvfc_issue_fc_host_lip(vhost->host);
 
-	if (block_rc == FAST_IO_FAIL)
-		return FAST_IO_FAIL;
-
 	return rc ? FAILED : SUCCESS;
 }
 
@@ -4929,7 +4925,7 @@ static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
 	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
 }
 
-static struct vio_device_id ibmvfc_device_table[] = {
+static const struct vio_device_id ibmvfc_device_table[] = {
 	{"fcp", "IBM,vfc-client"},
 	{ "", "" }
 };
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index da22b3665cb04efb0a24298ef3b70964920806e6..7d156b161482988367ac323f8182d177421d1163 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2330,7 +2330,7 @@ static int ibmvscsi_resume(struct device *dev)
  * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
  * support.
  */
-static struct vio_device_id ibmvscsi_device_table[] = {
+static const struct vio_device_id ibmvscsi_device_table[] = {
 	{"vscsi", "IBM,v-scsi"},
 	{ "", "" }
 };
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 1f75d0380516fb661b0332ada1255aeb7055a866..785fb42f66502a40e005c912d1cf9022a86ddb62 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -4086,7 +4086,7 @@ static struct class ibmvscsis_class = {
 	.dev_groups	= ibmvscsis_dev_groups,
 };
 
-static struct vio_device_id ibmvscsis_device_table[] = {
+static const struct vio_device_id ibmvscsis_device_table[] = {
 	{ "v-scsi-host", "IBM,v-scsi-host" },
 	{ "", "" }
 };
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 9164ce1249c107fd2460d0fd9b83daa6f178584b..87c94191033ba399515f620a39d74e995fdef69f 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1106,7 +1106,6 @@ static struct scsi_host_template imm_template = {
 	.name			= "Iomega VPI2 (imm) interface",
 	.queuecommand		= imm_queuecommand,
 	.eh_abort_handler	= imm_abort,
-	.eh_bus_reset_handler	= imm_reset,
 	.eh_host_reset_handler	= imm_reset,
 	.bios_param		= imm_biosparam,
 	.this_id		= 7,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 45371179ab872d53f433a62e5d0e6ed80dfdb674..922e3e56c90d945a19a81faab8d7ff8c239d3448 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -166,7 +166,7 @@ static struct scsi_host_template isci_sht = {
 	.use_clustering			= ENABLE_CLUSTERING,
 	.eh_abort_handler		= sas_eh_abort_handler,
 	.eh_device_reset_handler        = sas_eh_device_reset_handler,
-	.eh_bus_reset_handler           = sas_eh_bus_reset_handler,
+	.eh_target_reset_handler        = sas_eh_target_reset_handler,
 	.target_destroy			= sas_target_destroy,
 	.ioctl				= sas_ioctl,
 	.shost_attrs			= isci_host_attrs,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 4842fc0e809d1cf6846ba531165953f1dd11f993..4d934d6c3e13adb26a91fec4dc665ed1f3f45bcb 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -163,7 +163,6 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
 	struct iscsi_tcp_conn *tcp_conn;
 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
 	struct iscsi_conn *conn;
-	struct iscsi_session *session;
 	void (*old_state_change)(struct sock *);
 
 	read_lock_bh(&sk->sk_callback_lock);
@@ -172,7 +171,6 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
 		read_unlock_bh(&sk->sk_callback_lock);
 		return;
 	}
-	session = conn->session;
 
 	iscsi_sw_sk_state_check(sk);
 
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 234352da5c3cebeff0d523bbe3842d297ddd8272..772c35a5c49ef7899ce0ef65417f566390fea712 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2222,8 +2222,6 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
 
 	FC_SCSI_DBG(lport, "Resetting host\n");
 
-	fc_block_scsi_eh(sc_cmd);
-
 	fc_lport_reset(lport);
 	wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
 	while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 42381adf076991d4e2fb4ed2a11e18162e93df25..bd4605a34f54d655a09471243c8d4386b9e78b3d 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1078,7 +1078,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		if (opcode != ISCSI_OP_NOOP_OUT)
 			return 0;
 
-		 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
+		if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
 			/*
 			 * nop-out in response to target's nop-out rejected.
 			 * Just resend.
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 9dafe64e7c7ace76e25aaabcb21d06c39b4241b1..13739bfacc6797822d05af8b16aae72c8fcab3ed 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -26,6 +26,7 @@ config SCSI_SAS_LIBSAS
 	tristate "SAS Domain Transport Attributes"
 	depends on SCSI
 	select SCSI_SAS_ATTRS
+	select BLK_DEV_BSGLIB
 	help
 	  This provides transport specific helpers for SAS drivers which
 	  use the domain device construct (like the aic94xxx).
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 87f5e694dbedd8a7eda74c1a5dccd0e9008e8c93..70be4425ae0be495c0d08d3408a20c3c7d90d6a7 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -343,6 +343,7 @@ static int smp_ata_check_ready(struct ata_link *link)
 	case SAS_END_DEVICE:
 		if (ex_phy->attached_sata_dev)
 			return sas_ata_clear_pending(dev, ex_phy);
+		/* fall through */
 	default:
 		return -ENODEV;
 	}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 570b2cb2da43921f8fc65cae4235ca3bb0ee6748..6b4fd2375178588b5d6b899ae32da25766e0f3dd 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -64,8 +64,8 @@ static void smp_task_done(struct sas_task *task)
 /* Give it some long enough timeout. In seconds. */
 #define SMP_TIMEOUT 10
 
-static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
-			    void *resp, int resp_size)
+static int smp_execute_task_sg(struct domain_device *dev,
+		struct scatterlist *req, struct scatterlist *resp)
 {
 	int res, retry;
 	struct sas_task *task = NULL;
@@ -86,8 +86,8 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
 		}
 		task->dev = dev;
 		task->task_proto = dev->tproto;
-		sg_init_one(&task->smp_task.smp_req, req, req_size);
-		sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
+		task->smp_task.smp_req = *req;
+		task->smp_task.smp_resp = *resp;
 
 		task->task_done = smp_task_done;
 
@@ -151,6 +151,17 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
 	return res;
 }
 
+static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
+			    void *resp, int resp_size)
+{
+	struct scatterlist req_sg;
+	struct scatterlist resp_sg;
+
+	sg_init_one(&req_sg, req, req_size);
+	sg_init_one(&resp_sg, resp, resp_size);
+	return smp_execute_task_sg(dev, &req_sg, &resp_sg);
+}
+
 /* ---------- Allocations ---------- */
 
 static inline void *alloc_smp_req(int size)
@@ -2130,57 +2141,50 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
 	return res;
 }
 
-int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
-		    struct request *req)
+void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+		struct sas_rphy *rphy)
 {
 	struct domain_device *dev;
-	int ret, type;
-	struct request *rsp = req->next_rq;
-
-	if (!rsp) {
-		printk("%s: space for a smp response is missing\n",
-		       __func__);
-		return -EINVAL;
-	}
+	unsigned int reslen = 0;
+	int ret = -EINVAL;
 
 	/* no rphy means no smp target support (ie aic94xx host) */
 	if (!rphy)
-		return sas_smp_host_handler(shost, req, rsp);
-
-	type = rphy->identify.device_type;
+		return sas_smp_host_handler(job, shost);
 
-	if (type != SAS_EDGE_EXPANDER_DEVICE &&
-	    type != SAS_FANOUT_EXPANDER_DEVICE) {
+	switch (rphy->identify.device_type) {
+	case SAS_EDGE_EXPANDER_DEVICE:
+	case SAS_FANOUT_EXPANDER_DEVICE:
+		break;
+	default:
 		printk("%s: can we send a smp request to a device?\n",
 		       __func__);
-		return -EINVAL;
+		goto out;
 	}
 
 	dev = sas_find_dev_by_rphy(rphy);
 	if (!dev) {
 		printk("%s: fail to find a domain_device?\n", __func__);
-		return -EINVAL;
+		goto out;
 	}
 
 	/* do we need to support multiple segments? */
-	if (bio_multiple_segments(req->bio) ||
-	    bio_multiple_segments(rsp->bio)) {
+	if (job->request_payload.sg_cnt > 1 ||
+	    job->reply_payload.sg_cnt > 1) {
 		printk("%s: multiple segments req %u, rsp %u\n",
-		       __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
-		return -EINVAL;
+		       __func__, job->request_payload.payload_len,
+		       job->reply_payload.payload_len);
+		goto out;
 	}
 
-	ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
-			       bio_data(rsp->bio), blk_rq_bytes(rsp));
+	ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
+			job->reply_payload.sg_list);
 	if (ret > 0) {
 		/* positive number is the untransferred residual */
-		scsi_req(rsp)->resid_len = ret;
-		scsi_req(req)->resid_len = 0;
+		reslen = ret;
 		ret = 0;
-	} else if (ret == 0) {
-		scsi_req(rsp)->resid_len = 0;
-		scsi_req(req)->resid_len = 0;
 	}
 
-	return ret;
+out:
+	bsg_job_done(job, ret, reslen);
 }
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 45cbbc44f4d7d9d74923fbccc813294aa4c73b9e..9ead93df3a6e21621b00d067a0b5aef06eb8c5d8 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -225,47 +225,36 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
 		resp_data[2] = SMP_RESP_FUNC_ACC;
 }
 
-int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
-			 struct request *rsp)
+void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost)
 {
-	u8 *req_data = NULL, *resp_data = NULL, *buf;
 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+	u8 *req_data, *resp_data;
+	unsigned int reslen = 0;
 	int error = -EINVAL;
 
 	/* eight is the minimum size for request and response frames */
-	if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
+	if (job->request_payload.payload_len < 8 ||
+	    job->reply_payload.payload_len < 8)
 		goto out;
 
-	if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
-	    bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
-		shost_printk(KERN_ERR, shost,
-			"SMP request/response frame crosses page boundary");
+	error = -ENOMEM;
+	req_data = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+	if (!req_data)
 		goto out;
-	}
-
-	req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt, req_data,
+			  job->request_payload.payload_len);
 
 	/* make sure frame can always be built ... we copy
 	 * back only the requested length */
-	resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
-
-	if (!req_data || !resp_data) {
-		error = -ENOMEM;
-		goto out;
-	}
-
-	local_irq_disable();
-	buf = kmap_atomic(bio_page(req->bio));
-	memcpy(req_data, buf, blk_rq_bytes(req));
-	kunmap_atomic(buf - bio_offset(req->bio));
-	local_irq_enable();
+	resp_data = kzalloc(max(job->reply_payload.payload_len, 128U),
+			GFP_KERNEL);
+	if (!resp_data)
+		goto out_free_req;
 
+	error = -EINVAL;
 	if (req_data[0] != SMP_REQUEST)
-		goto out;
-
-	/* always succeeds ... even if we can't process the request
-	 * the result is in the response frame */
-	error = 0;
+		goto out_free_resp;
 
 	/* set up default don't know response */
 	resp_data[0] = SMP_RESPONSE;
@@ -274,20 +263,18 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 
 	switch (req_data[1]) {
 	case SMP_REPORT_GENERAL:
-		scsi_req(req)->resid_len -= 8;
-		scsi_req(rsp)->resid_len -= 32;
 		resp_data[2] = SMP_RESP_FUNC_ACC;
 		resp_data[9] = sas_ha->num_phys;
+		reslen = 32;
 		break;
 
 	case SMP_REPORT_MANUF_INFO:
-		scsi_req(req)->resid_len -= 8;
-		scsi_req(rsp)->resid_len -= 64;
 		resp_data[2] = SMP_RESP_FUNC_ACC;
 		memcpy(resp_data + 12, shost->hostt->name,
 		       SAS_EXPANDER_VENDOR_ID_LEN);
 		memcpy(resp_data + 20, "libsas virt phy",
 		       SAS_EXPANDER_PRODUCT_ID_LEN);
+		reslen = 64;
 		break;
 
 	case SMP_READ_GPIO_REG:
@@ -295,14 +282,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 		break;
 
 	case SMP_DISCOVER:
-		scsi_req(req)->resid_len -= 16;
-		if ((int)scsi_req(req)->resid_len < 0) {
-			scsi_req(req)->resid_len = 0;
-			error = -EINVAL;
-			goto out;
-		}
-		scsi_req(rsp)->resid_len -= 56;
+		if (job->request_payload.payload_len < 16)
+			goto out_free_resp;
 		sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
+		reslen = 56;
 		break;
 
 	case SMP_REPORT_PHY_ERR_LOG:
@@ -311,14 +294,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 		break;
 
 	case SMP_REPORT_PHY_SATA:
-		scsi_req(req)->resid_len -= 16;
-		if ((int)scsi_req(req)->resid_len < 0) {
-			scsi_req(req)->resid_len = 0;
-			error = -EINVAL;
-			goto out;
-		}
-		scsi_req(rsp)->resid_len -= 60;
+		if (job->request_payload.payload_len < 16)
+			goto out_free_resp;
 		sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
+		reslen = 60;
 		break;
 
 	case SMP_REPORT_ROUTE_INFO:
@@ -330,16 +309,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 		const int base_frame_size = 11;
 		int to_write = req_data[4];
 
-		if (blk_rq_bytes(req) < base_frame_size + to_write * 4 ||
-		    scsi_req(req)->resid_len < base_frame_size + to_write * 4) {
+		if (job->request_payload.payload_len <
+				base_frame_size + to_write * 4) {
 			resp_data[2] = SMP_RESP_INV_FRM_LEN;
 			break;
 		}
 
 		to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2],
 						   req_data[3], to_write, &req_data[8]);
-		scsi_req(req)->resid_len -= base_frame_size + to_write * 4;
-		scsi_req(rsp)->resid_len -= 8;
+		reslen = 8;
 		break;
 	}
 
@@ -348,16 +326,12 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 		break;
 
 	case SMP_PHY_CONTROL:
-		scsi_req(req)->resid_len -= 44;
-		if ((int)scsi_req(req)->resid_len < 0) {
-			scsi_req(req)->resid_len = 0;
-			error = -EINVAL;
-			goto out;
-		}
-		scsi_req(rsp)->resid_len -= 8;
+		if (job->request_payload.payload_len < 44)
+			goto out_free_resp;
 		sas_phy_control(sas_ha, req_data[9], req_data[10],
 				req_data[32] >> 4, req_data[33] >> 4,
 				resp_data);
+		reslen = 8;
 		break;
 
 	case SMP_PHY_TEST_FUNCTION:
@@ -369,15 +343,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 		break;
 	}
 
-	local_irq_disable();
-	buf = kmap_atomic(bio_page(rsp->bio));
-	memcpy(buf, resp_data, blk_rq_bytes(rsp));
-	flush_kernel_dcache_page(bio_page(rsp->bio));
-	kunmap_atomic(buf - bio_offset(rsp->bio));
-	local_irq_enable();
+	sg_copy_from_buffer(job->reply_payload.sg_list,
+			    job->reply_payload.sg_cnt, resp_data,
+			    job->reply_payload.payload_len);
 
- out:
-	kfree(req_data);
+	error = 0;
+out_free_resp:
 	kfree(resp_data);
-	return error;
+out_free_req:
+	kfree(req_data);
+out:
+	bsg_job_done(job, error, reslen);
 }
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index a216c957b639a3a2113b8d47176fbcdc2ad3b1ac..c07e0813649153d609f52e166c39b51a032a9e49 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -81,6 +81,8 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
 int sas_notify_lldd_dev_found(struct domain_device *);
 void sas_notify_lldd_dev_gone(struct domain_device *);
 
+void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+		struct sas_rphy *rphy);
 int sas_smp_phy_control(struct domain_device *dev, int phy_id,
 			enum phy_func phy_func, struct sas_phy_linkrates *);
 int sas_smp_get_phy_events(struct sas_phy *phy);
@@ -98,16 +100,14 @@ void sas_hae_reset(struct work_struct *work);
 void sas_free_device(struct kref *kref);
 
 #ifdef CONFIG_SCSI_SAS_HOST_SMP
-extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
-				struct request *rsp);
+extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
 #else
-static inline int sas_smp_host_handler(struct Scsi_Host *shost,
-				       struct request *req,
-				       struct request *rsp)
+static inline void sas_smp_host_handler(struct bsg_job *job,
+		struct Scsi_Host *shost)
 {
 	shost_printk(KERN_ERR, shost,
 		"Cannot send SMP to a sas host (not enabled in CONFIG)\n");
-	return -EINVAL;
+	bsg_job_done(job, -EINVAL, 0);
 }
 #endif
 
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 87e5079d816b949c253487f764d349a39354ae4a..ea8ad06ff582eef292a0b11d42248502aaef8b33 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -526,7 +526,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
 	return FAILED;
 }
 
-int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
+int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
 {
 	int res;
 	struct Scsi_Host *host = cmd->device->host;
@@ -554,15 +554,15 @@ static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
 	struct Scsi_Host *shost = cmd->device->host;
 
 	if (!shost->hostt->eh_device_reset_handler)
-		goto try_bus_reset;
+		goto try_target_reset;
 
 	res = shost->hostt->eh_device_reset_handler(cmd);
 	if (res == SUCCESS)
 		return res;
 
-try_bus_reset:
-	if (shost->hostt->eh_bus_reset_handler)
-		return shost->hostt->eh_bus_reset_handler(cmd);
+try_target_reset:
+	if (shost->hostt->eh_target_reset_handler)
+		return shost->hostt->eh_target_reset_handler(cmd);
 
 	return FAILED;
 }
@@ -855,7 +855,6 @@ int sas_target_alloc(struct scsi_target *starget)
 int sas_slave_configure(struct scsi_device *scsi_dev)
 {
 	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
-	struct sas_ha_struct *sas_ha;
 
 	BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
 
@@ -864,8 +863,6 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
 		return 0;
 	}
 
-	sas_ha = dev->port->ha;
-
 	sas_read_port_mode_page(scsi_dev);
 
 	if (scsi_dev->tagged_supported) {
@@ -996,6 +993,6 @@ EXPORT_SYMBOL_GPL(sas_bios_param);
 EXPORT_SYMBOL_GPL(sas_task_abort);
 EXPORT_SYMBOL_GPL(sas_phy_reset);
 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
-EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
+EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
 EXPORT_SYMBOL_GPL(sas_target_destroy);
 EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 562dc0139735e2742e681de9b7638825e2880b86..8eb3f96fe06896ca091e634cf55b8e76819d0dec 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -733,7 +733,6 @@ struct lpfc_hba {
 	uint32_t fc_rttov;	/* R_T_TOV timer value */
 	uint32_t fc_altov;	/* AL_TOV timer value */
 	uint32_t fc_crtov;	/* C_R_TOV timer value */
-	uint32_t fc_citov;	/* C_I_TOV timer value */
 
 	struct serv_parm fc_fabparam;	/* fabric service parameters buffer */
 	uint8_t alpa_map[128];	/* AL_PA map from READ_LA */
@@ -757,6 +756,7 @@ struct lpfc_hba {
 #define LPFC_NVMET_MAX_PORTS	32
 	uint8_t  mds_diags_support;
 	uint32_t initial_imax;
+	uint8_t  bbcredit_support;
 
 	/* HBA Config Parameters */
 	uint32_t cfg_ack0;
@@ -836,6 +836,7 @@ struct lpfc_hba {
 	uint32_t cfg_enable_SmartSAN;
 	uint32_t cfg_enable_mds_diags;
 	uint32_t cfg_enable_fc4_type;
+	uint32_t cfg_enable_bbcr;	/*Enable BB Credit Recovery*/
 	uint32_t cfg_xri_split;
 #define LPFC_ENABLE_FCP  1
 #define LPFC_ENABLE_NVME 2
@@ -946,14 +947,14 @@ struct lpfc_hba {
 	struct list_head active_rrq_list;
 	spinlock_t hbalock;
 
-	/* pci_mem_pools */
-	struct pci_pool *lpfc_sg_dma_buf_pool;
-	struct pci_pool *lpfc_mbuf_pool;
-	struct pci_pool *lpfc_hrb_pool;	/* header receive buffer pool */
-	struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
-	struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
-	struct pci_pool *lpfc_hbq_pool;	/* SLI3 hbq buffer pool */
-	struct pci_pool *txrdy_payload_pool;
+	/* dma_mem_pools */
+	struct dma_pool *lpfc_sg_dma_buf_pool;
+	struct dma_pool *lpfc_mbuf_pool;
+	struct dma_pool *lpfc_hrb_pool;	/* header receive buffer pool */
+	struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
+	struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
+	struct dma_pool *lpfc_hbq_pool;	/* SLI3 hbq buffer pool */
+	struct dma_pool *txrdy_payload_pool;
 	struct lpfc_dma_pool lpfc_mbuf_safety_pool;
 
 	mempool_t *mbox_mem_pool;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 7ee1a94c0b33eefd57a6889df66649477ad4713b..c17677f494afe7f9baf741732511c62d4ea0bde7 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -247,13 +247,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
 				atomic_read(&tgtp->xmt_abort_rsp),
 				atomic_read(&tgtp->xmt_abort_rsp_error));
 
-		spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-		spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		tot = phba->sli4_hba.nvmet_xri_cnt -
-			(phba->sli4_hba.nvmet_ctx_get_cnt +
-			phba->sli4_hba.nvmet_ctx_put_cnt);
-		spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+		/* Calculate outstanding IOs */
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+		tot += atomic_read(&tgtp->xmt_fcp_release);
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
 
 		len += snprintf(buf + len, PAGE_SIZE - len,
 				"IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
@@ -1893,6 +1890,36 @@ static inline bool lpfc_rangecheck(uint val, uint min, uint max)
 	return val >= min && val <= max;
 }
 
+/**
+ * lpfc_enable_bbcr_set: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description:
+ * Validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ */
+static ssize_t
+lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
+{
+	if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3068 %s_enable_bbcr changed from %d to %d\n",
+				LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
+		phba->cfg_enable_bbcr = val;
+		return 0;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
+			LPFC_DRIVER_NAME, val);
+	return -EINVAL;
+}
+
 /**
  * lpfc_param_show - Return a cfg attribute value in decimal
  *
@@ -5116,6 +5143,14 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
  */
 LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
 
+/*
+ * lpfc_enable_bbcr: Enable BB Credit Recovery
+ *       0  = BB Credit Recovery disabled
+ *       1  = BB Credit Recovery enabled (default)
+ * Value range is [0,1]. Default value is 1.
+ */
+LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
+
 struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_nvme_info,
 	&dev_attr_bg_info,
@@ -5223,6 +5258,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_protocol,
 	&dev_attr_lpfc_xlane_supported,
 	&dev_attr_lpfc_enable_mds_diags,
+	&dev_attr_lpfc_enable_bbcr,
 	NULL,
 };
 
@@ -6234,11 +6270,13 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
 	lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
 	lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
+	lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
 
 	if (phba->sli_rev != LPFC_SLI_REV4) {
 		/* NVME only supported on SLI4 */
 		phba->nvmet_support = 0;
 		phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+		phba->cfg_enable_bbcr = 0;
 	} else {
 		/* We MUST have FCP support */
 		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h
index d56dafcdd563b6d762079173590c6136ed6f7927..931db52692f50032d2067e7a9946266a4e792134 100644
--- a/drivers/scsi/lpfc/lpfc_attr.h
+++ b/drivers/scsi/lpfc/lpfc_attr.h
@@ -46,6 +46,16 @@ lpfc_param_store(name)\
 static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
 		   lpfc_##name##_show, lpfc_##name##_store)
 
+#define LPFC_BBCR_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0444);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, 0444 | 0644,\
+		   lpfc_##name##_show, lpfc_##name##_store)
+
 #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
 static uint lpfc_##name = defval;\
 module_param(lpfc_##name, uint, S_IRUGO);\
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index a1686c2d863c546d1d30342dd0a75200b82ff9b1..fe9e1c079c20fc1b66e9124bd71c94a5b8356bc8 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2384,20 +2384,17 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
 		goto job_error;
 
 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!pmboxq) {
-		rc = -ENOMEM;
+	if (!pmboxq)
 		goto link_diag_test_exit;
-	}
 
 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
 		   sizeof(struct lpfc_sli4_cfg_mhdr));
 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
 				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
 				     req_len, LPFC_SLI4_MBX_EMBED);
-	if (alloc_len != req_len) {
-		rc = -ENOMEM;
+	if (alloc_len != req_len)
 		goto link_diag_test_exit;
-	}
+
 	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
 	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
 	       phba->sli4_hba.lnk_info.lnk_no);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index da669dce12feb09c6df8564d16f3b0a3fc14c150..7e300734b345443bace5dd76de1441ca7d254077 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -556,9 +556,8 @@ int lpfc_nvmet_update_targetport(struct lpfc_hba *phba);
 void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba);
 void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
 			struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
-void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
-			struct lpfc_sli_ring *pring,
-			struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
+void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx,
+				struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
 void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
 void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
 				struct lpfc_iocbq *cmdiocb,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 9c0c1463057d5a781295976789ac27f6089b02ab..33417681f5d40784ffc15517f1ed137f2c6f23e3 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -955,7 +955,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
 		fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
 		fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 				 "3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n",
 				 did, fc4_data_0, fc4_data_1);
 
@@ -969,7 +969,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
 			if (fc4_data_1 &  LPFC_FC4_TYPE_BITMASK)
 				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 					 "3064 Setting ndlp %p, DID x%06x with "
 					 "FC4 x%08x, Data: x%08x x%08x\n",
 					 ndlp, did, ndlp->nlp_fc4_type,
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 744f3f395b64852a294a9300adb64a496087aed7..d50c481ec41ccb0f8e3258c16dd9ec5897a6669c 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -851,13 +851,10 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 			spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
 		}
 
-		spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-		spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		tot = phba->sli4_hba.nvmet_xri_cnt -
-			(phba->sli4_hba.nvmet_ctx_get_cnt +
-			phba->sli4_hba.nvmet_ctx_put_cnt);
-		spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+		/* Calculate outstanding IOs */
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+		tot += atomic_read(&tgtp->xmt_fcp_release);
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
 
 		len += snprintf(buf + len, size - len,
 				"IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 7b7d314af0e0878f4a37eef337ebf29de84a3d1c..c4edd87bfc658e5e4d2511031c87044b44f22d59 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -478,16 +478,16 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
 		return;
 
 	for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
-		eq = phba->sli4_hba.hba_eq[eqidx];
-		if (cq->assoc_qid == eq->queue_id)
+		if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id)
 			break;
 	}
 	if (eqidx == phba->io_channel_irqs) {
 		pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
 		eqidx = 0;
-		eq = phba->sli4_hba.hba_eq[0];
 	}
 
+	eq = phba->sli4_hba.hba_eq[eqidx];
+
 	if (qtype == DUMP_FCP || qtype == DUMP_NVME)
 		pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
 			"->EQ[Idx:%d|Qid:%d]:\n",
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 094c97b9e5f741faba5e0ef904f97faa3cc7a734..f9a566eaef0442258a2bd27a6093da76909ecfa4 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -159,6 +159,7 @@ struct lpfc_node_rrq {
 #define NLP_RNID_SND       0x00000400	/* sent RNID request for this entry */
 #define NLP_ELS_SND_MASK   0x000007e0	/* sent ELS request for this entry */
 #define NLP_NVMET_RECOV    0x00001000   /* NVMET auditing node for recovery. */
+#define NLP_FCP_PRLI_RJT   0x00002000   /* Rport does not support FCP PRLI. */
 #define NLP_DEFER_RM       0x00010000	/* Remove this ndlp if no longer used */
 #define NLP_DELAY_TMO      0x00020000	/* delay timeout is running for node */
 #define NLP_NPR_2B_DISC    0x00040000	/* node is included in num_disc_nodes */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6d1d6f691df4f206736f0e88d0f45020806bd966..468a66371de9d0c30f6e522641e42677156701cd 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1527,6 +1527,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 	uint8_t  name[sizeof(struct lpfc_name)];
 	uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
 	uint16_t keep_nlp_state;
+	struct lpfc_nvme_rport *keep_nrport = NULL;
 	int  put_node;
 	int  put_rport;
 	unsigned long *active_rrqs_xri_bitmap = NULL;
@@ -1624,6 +1625,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 	keep_nlp_state = new_ndlp->nlp_state;
 	lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
 
+	/* interchange the nvme remoteport structs */
+	keep_nrport = new_ndlp->nrport;
+	new_ndlp->nrport = ndlp->nrport;
+
 	/* Move this back to NPR state */
 	if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
 		/* The new_ndlp is replacing ndlp totally, so we need
@@ -1646,6 +1651,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 			}
 			new_ndlp->nlp_type = ndlp->nlp_type;
 		}
+
+		/* Fix up the nvme rport */
+		if (ndlp->nrport) {
+			ndlp->nrport = NULL;
+			lpfc_nlp_put(ndlp);
+		}
+
 		/* We shall actually free the ndlp with both nlp_DID and
 		 * nlp_portname fields equals 0 to avoid any ndlp on the
 		 * nodelist never to be used.
@@ -1690,6 +1702,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 			keep_nlp_state = NLP_STE_NPR_NODE;
 		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
 
+		/* Previous ndlp no longer active with nvme host transport.
+		 * Remove reference from earlier registration unless the
+		 * nvme host took care of it.
+		 */
+		if (ndlp->nrport)
+			lpfc_nlp_put(ndlp);
+		ndlp->nrport = keep_nrport;
+
 		/* Fix up the rport accordingly */
 		rport = ndlp->rport;
 		if (rport) {
@@ -1966,6 +1986,7 @@ int
 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 {
 	struct lpfc_hba  *phba = vport->phba;
+	struct Scsi_Host *shost;
 	struct serv_parm *sp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_iocbq *elsiocb;
@@ -1984,6 +2005,11 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 	if (!elsiocb)
 		return 1;
 
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
+	spin_unlock_irq(shost->host_lock);
+
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 
 	/* For PLOGI request, remainder of payload is service parameters */
@@ -2007,6 +2033,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 
 	sp->cmn.valid_vendor_ver_level = 0;
 	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
+	sp->cmn.bbRcvSizeMsb &= 0xF;
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 		"Issue PLOGI:     did:x%x",
@@ -2151,6 +2178,16 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	uint16_t cmdsize;
 	u32 local_nlp_type, elscmd;
 
+	/*
+	 * If we are in RSCN mode, the FC4 types supported from a
+	 * previous GFT_ID command may not be accurate. So, if we
+	 * are a NVME Initiator, always look for the possibility of
+	 * the remote NPort beng a NVME Target.
+	 */
+	if (phba->sli_rev == LPFC_SLI_REV4 &&
+	    vport->fc_flag & FC_RSCN_MODE &&
+	    vport->nvmei_support)
+		ndlp->nlp_fc4_type |= NLP_FC4_NVME;
 	local_nlp_type = ndlp->nlp_fc4_type;
 
  send_next_prli:
@@ -3420,8 +3457,18 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 				maxretry = 3;
 				delay = 1000;
 				retry = 1;
-				break;
+			} else if (cmd == ELS_CMD_FLOGI &&
+				   stat.un.b.lsRjtRsnCodeExp ==
+						LSEXP_NOTHING_MORE) {
+				vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
+				retry = 1;
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+						 "0820 FLOGI Failed (x%x). "
+						 "BBCredit Not Supported\n",
+						 stat.un.lsRjtError);
 			}
+			break;
+
 		case LSRJT_PROTOCOL_ERR:
 			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 			  (cmd == ELS_CMD_FDISC) &&
@@ -3442,6 +3489,21 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 				goto out_retry;
 			}
 			break;
+		case LSRJT_CMD_UNSUPPORTED:
+			/* lpfc nvmet returns this type of LS_RJT when it
+			 * receives an FCP PRLI because lpfc nvmet only
+			 * support NVME.  ELS request is terminated for FCP4
+			 * on this rport.
+			 */
+			if (stat.un.b.lsRjtRsnCodeExp ==
+			    LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
+				spin_lock_irq(shost->host_lock);
+				ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
+				spin_unlock_irq(shost->host_lock);
+				retry = 0;
+				goto out_retry;
+			}
+			break;
 		}
 		break;
 
@@ -3930,7 +3992,25 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	if (mbox) {
 		if ((rspiocb->iocb.ulpStatus == 0)
 		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
-			lpfc_unreg_rpi(vport, ndlp);
+			if (!lpfc_unreg_rpi(vport, ndlp) &&
+			    (ndlp->nlp_state ==  NLP_STE_PLOGI_ISSUE ||
+			     ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
+				lpfc_printf_vlog(vport, KERN_INFO,
+					LOG_DISCOVERY,
+					"0314 PLOGI recov DID x%x "
+					"Data: x%x x%x x%x\n",
+					ndlp->nlp_DID, ndlp->nlp_state,
+					ndlp->nlp_rpi, ndlp->nlp_flag);
+				mp = mbox->context1;
+				if (mp) {
+					lpfc_mbuf_free(phba, mp->virt,
+						       mp->phys);
+					kfree(mp);
+				}
+				mempool_free(mbox, phba->mbox_mem_pool);
+				goto out;
+			}
+
 			/* Increment reference count to ndlp to hold the
 			 * reference to ndlp for the callback function.
 			 */
@@ -4132,6 +4212,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 			sp->cmn.valid_vendor_ver_level = 0;
 			memset(sp->un.vendorVersion, 0,
 			       sizeof(sp->un.vendorVersion));
+			sp->cmn.bbRcvSizeMsb &= 0xF;
 
 			/* If our firmware supports this feature, convey that
 			 * info to the target using the vendor specific field.
@@ -7989,6 +8070,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			rjt_exp = LSEXP_NOTHING_MORE;
 			break;
 		}
+
+		/* NVMET accepts NVME PRLI only.  Reject FCP PRLI */
+		if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
+			rjt_err = LSRJT_CMD_UNSUPPORTED;
+			rjt_exp = LSEXP_REQ_UNSUPPORTED;
+			break;
+		}
 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
 		break;
 	case ELS_CMD_LIRR:
@@ -8784,6 +8872,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	pcmd += sizeof(uint32_t); /* Node Name */
 	pcmd += sizeof(uint32_t); /* Node Name */
 	memcpy(pcmd, &vport->fc_nodename, 8);
+	sp->cmn.valid_vendor_ver_level = 0;
 	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
 	lpfc_set_disctmo(vport);
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index aa5e5ff56dfba57e0d7f98ebe39b140e2dbcbfce..20808349a80e9908d6d535b5107aa50882c0f553 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1108,6 +1108,7 @@ void
 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	struct lpfc_vport *vport = pmb->vport;
+	uint8_t bbscn = 0;
 
 	if (pmb->u.mb.mbxStatus)
 		goto out;
@@ -1134,10 +1135,17 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	/* Start discovery by sending a FLOGI. port_state is identically
 	 * LPFC_FLOGI while waiting for FLOGI cmpl
 	 */
-	if (vport->port_state != LPFC_FLOGI)
+	if (vport->port_state != LPFC_FLOGI) {
+		if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
+			bbscn = bf_get(lpfc_bbscn_def,
+				       &phba->sli4_hba.bbscn_params);
+			vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
+			vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
+		}
 		lpfc_initial_flogi(vport);
-	else if (vport->fc_flag & FC_PT2PT)
+	} else if (vport->fc_flag & FC_PT2PT) {
 		lpfc_disc_start(vport);
+	}
 	return;
 
 out:
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 26a5647e057e6de3baf3f48ec25b32161ae2aeb9..bdc1f184f67a3fdfcb6cdbef3ad8fd92cc819084 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2293,15 +2293,27 @@ typedef struct {
 	uint32_t rttov;
 	uint32_t altov;
 	uint32_t crtov;
-	uint32_t citov;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd4:19;
+	uint32_t cscn:1;
+	uint32_t bbscn:4;
+	uint32_t rsvd3:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t rsvd3:8;
+	uint32_t bbscn:4;
+	uint32_t cscn:1;
+	uint32_t rsvd4:19;
+#endif
+
 #ifdef __BIG_ENDIAN_BITFIELD
 	uint32_t rrq_enable:1;
 	uint32_t rrq_immed:1;
-	uint32_t rsvd4:29;
+	uint32_t rsvd5:29;
 	uint32_t ack0_enable:1;
 #else	/*  __LITTLE_ENDIAN_BITFIELD */
 	uint32_t ack0_enable:1;
-	uint32_t rsvd4:29;
+	uint32_t rsvd5:29;
 	uint32_t rrq_immed:1;
 	uint32_t rrq_enable:1;
 #endif
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bb4715705fa342b787567aa77d866116c612be2a..1db0a38683f438d8756cffc30db45a4afd97c3e8 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2217,9 +2217,15 @@ struct lpfc_mbx_reg_vfi {
 	uint32_t e_d_tov;
 	uint32_t r_a_tov;
 	uint32_t word10;
-#define lpfc_reg_vfi_nport_id_SHIFT		0
-#define lpfc_reg_vfi_nport_id_MASK		0x00FFFFFF
-#define lpfc_reg_vfi_nport_id_WORD		word10
+#define lpfc_reg_vfi_nport_id_SHIFT	0
+#define lpfc_reg_vfi_nport_id_MASK	0x00FFFFFF
+#define lpfc_reg_vfi_nport_id_WORD	word10
+#define lpfc_reg_vfi_bbcr_SHIFT		27
+#define lpfc_reg_vfi_bbcr_MASK		0x00000001
+#define lpfc_reg_vfi_bbcr_WORD		word10
+#define lpfc_reg_vfi_bbscn_SHIFT	28
+#define lpfc_reg_vfi_bbscn_MASK		0x0000000F
+#define lpfc_reg_vfi_bbscn_WORD		word10
 };
 
 struct lpfc_mbx_init_vpi {
@@ -2646,7 +2652,16 @@ struct lpfc_mbx_read_config {
 #define lpfc_mbx_rd_conf_link_speed_MASK	0x0000FFFF
 #define lpfc_mbx_rd_conf_link_speed_WORD	word6
 	uint32_t rsvd_7;
-	uint32_t rsvd_8;
+	uint32_t word8;
+#define lpfc_mbx_rd_conf_bbscn_min_SHIFT	0
+#define lpfc_mbx_rd_conf_bbscn_min_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_min_WORD		word8
+#define lpfc_mbx_rd_conf_bbscn_max_SHIFT	4
+#define lpfc_mbx_rd_conf_bbscn_max_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_max_WORD		word8
+#define lpfc_mbx_rd_conf_bbscn_def_SHIFT	8
+#define lpfc_mbx_rd_conf_bbscn_def_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_def_WORD		word8
 	uint32_t word9;
 #define lpfc_mbx_rd_conf_lmt_SHIFT		0
 #define lpfc_mbx_rd_conf_lmt_MASK		0x0000FFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 491aa95eb0f6184cff8f0ea620d6c7a81e84a9e1..7e7ae786121b6e8458f7a7aa8954141c77e223a5 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -37,6 +37,7 @@
 #include <linux/miscdevice.h>
 #include <linux/percpu.h>
 #include <linux/msi.h>
+#include <linux/bitops.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -1253,6 +1254,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 	unsigned long time_elapsed;
 	uint32_t tick_cqe, max_cqe, val;
 	uint64_t tot, data1, data2, data3;
+	struct lpfc_nvmet_tgtport *tgtp;
 	struct lpfc_register reg_data;
 	void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
 
@@ -1281,13 +1283,11 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 		/* Check outstanding IO count */
 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
 			if (phba->nvmet_support) {
-				spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-				spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-				tot = phba->sli4_hba.nvmet_xri_cnt -
-					(phba->sli4_hba.nvmet_ctx_get_cnt +
-					phba->sli4_hba.nvmet_ctx_put_cnt);
-				spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-				spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+				tgtp = phba->targetport->private;
+				/* Calculate outstanding IOs */
+				tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+				tot += atomic_read(&tgtp->xmt_fcp_release);
+				tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
 			} else {
 				tot = atomic_read(&phba->fc4NvmeIoCmpls);
 				data1 = atomic_read(
@@ -3048,7 +3048,7 @@ lpfc_online(struct lpfc_hba *phba)
 {
 	struct lpfc_vport *vport;
 	struct lpfc_vport **vports;
-	int i;
+	int i, error = 0;
 	bool vpis_cleared = false;
 
 	if (!phba)
@@ -3072,6 +3072,18 @@ lpfc_online(struct lpfc_hba *phba)
 		if (!phba->sli4_hba.max_cfg_param.vpi_used)
 			vpis_cleared = true;
 		spin_unlock_irq(&phba->hbalock);
+
+		/* Reestablish the local initiator port.
+		 * The offline process destroyed the previous lport.
+		 */
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
+				!phba->nvmet_support) {
+			error = lpfc_nvme_create_localport(phba->pport);
+			if (error)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6132 NVME restore reg failed "
+					"on nvmei error x%x\n", error);
+		}
 	} else {
 		lpfc_sli_queue_init(phba);
 		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
@@ -3226,6 +3238,13 @@ lpfc_offline(struct lpfc_hba *phba)
 
 	/* stop port and all timers associated with this hba */
 	lpfc_stop_port(phba);
+
+	/* Tear down the local and target port registrations.  The
+	 * nvme transports need to cleanup.
+	 */
+	lpfc_nvmet_destroy_targetport(phba);
+	lpfc_nvme_destroy_localport(phba->pport);
+
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
@@ -3275,7 +3294,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
 				 list) {
 		list_del(&sb->list);
-		pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
 			      sb->dma_handle);
 		kfree(sb);
 		phba->total_scsi_bufs--;
@@ -3286,7 +3305,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
 				 list) {
 		list_del(&sb->list);
-		pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
 			      sb->dma_handle);
 		kfree(sb);
 		phba->total_scsi_bufs--;
@@ -3317,7 +3336,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
 				 &phba->lpfc_nvme_buf_list_put, list) {
 		list_del(&lpfc_ncmd->list);
-		pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
 			      lpfc_ncmd->dma_handle);
 		kfree(lpfc_ncmd);
 		phba->total_nvme_bufs--;
@@ -3328,7 +3347,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
 				 &phba->lpfc_nvme_buf_list_get, list) {
 		list_del(&lpfc_ncmd->list);
-		pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
 			      lpfc_ncmd->dma_handle);
 		kfree(lpfc_ncmd);
 		phba->total_nvme_bufs--;
@@ -3640,7 +3659,7 @@ lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
 			list_remove_head(&scsi_sgl_list, psb,
 					 struct lpfc_scsi_buf, list);
 			if (psb) {
-				pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 					      psb->data, psb->dma_handle);
 				kfree(psb);
 			}
@@ -3710,9 +3729,7 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		return be64_to_cpu(wwn);
 	else
-		return (((wwn & 0xffffffff00000000) >> 32) |
-			((wwn & 0x00000000ffffffff) << 32));
-
+		return rol64(wwn, 32);
 }
 
 /**
@@ -3774,7 +3791,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
 			list_remove_head(&nvme_sgl_list, lpfc_ncmd,
 					 struct lpfc_nvme_buf, list);
 			if (lpfc_ncmd) {
-				pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 					      lpfc_ncmd->data,
 					      lpfc_ncmd->dma_handle);
 				kfree(lpfc_ncmd);
@@ -5930,8 +5947,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 		spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
-		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
-		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
 
 		/* Fast-path XRI aborted CQ Event work queue list */
@@ -5940,8 +5955,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
 	/* This abort list used by worker thread */
 	spin_lock_init(&phba->sli4_hba.sgl_list_lock);
-	spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock);
-	spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock);
 	spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
 
 	/*
@@ -6516,6 +6529,12 @@ lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
 		lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
 		kfree(sglq_entry);
 	}
+
+	/* Update the nvmet_xri_cnt to reflect no current sgls.
+	 * The next initialization cycle sets the count and allocates
+	 * the sgls over again.
+	 */
+	phba->sli4_hba.nvmet_xri_cnt = 0;
 }
 
 /**
@@ -6846,8 +6865,8 @@ lpfc_create_shost(struct lpfc_hba *phba)
 	if (phba->nvmet_support) {
 		/* Only 1 vport (pport) will support NVME target */
 		if (phba->txrdy_payload_pool == NULL) {
-			phba->txrdy_payload_pool = pci_pool_create(
-				"txrdy_pool", phba->pcidev,
+			phba->txrdy_payload_pool = dma_pool_create(
+				"txrdy_pool", &phba->pcidev->dev,
 				TXRDY_PAYLOAD_LEN, 16, 0);
 			if (phba->txrdy_payload_pool) {
 				phba->targetport = NULL;
@@ -7605,6 +7624,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 					"3082 Mailbox (x%x) returned ldv:x0\n",
 					bf_get(lpfc_mqe_command, &pmb->u.mqe));
+		if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
+			phba->bbcredit_support = 1;
+			phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
+		}
+
 		phba->sli4_hba.extents_in_use =
 			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
 		phba->sli4_hba.max_cfg_param.max_xri =
@@ -8301,6 +8325,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 				goto out_error;
 			}
 
+			/* Put list in known state in case driver load fails. */
+			INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
+
 			/* Create NVMET Receive Queue for data */
 			qdesc = lpfc_sli4_queue_alloc(phba,
 						      phba->sli4_hba.rq_esize,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index ce25a18367b590b11c1a1521db559580bb06fa52..81fb92967b113dccb2d11d47caf1229e85bdacb7 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -376,7 +376,12 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 	mb->un.varCfgLnk.rttov = phba->fc_rttov;
 	mb->un.varCfgLnk.altov = phba->fc_altov;
 	mb->un.varCfgLnk.crtov = phba->fc_crtov;
-	mb->un.varCfgLnk.citov = phba->fc_citov;
+	mb->un.varCfgLnk.cscn = 0;
+	if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
+		mb->un.varCfgLnk.cscn = 1;
+		mb->un.varCfgLnk.bbscn = bf_get(lpfc_bbscn_def,
+						 &phba->sli4_hba.bbscn_params);
+	}
 
 	if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
 		mb->un.varCfgLnk.ack0_enable = 1;
@@ -2139,6 +2144,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 {
 	struct lpfc_mbx_reg_vfi *reg_vfi;
 	struct lpfc_hba *phba = vport->phba;
+	uint8_t bbscn_fabric = 0, bbscn_max = 0, bbscn_def = 0;
 
 	memset(mbox, 0, sizeof(*mbox));
 	reg_vfi = &mbox->u.mqe.un.reg_vfi;
@@ -2168,16 +2174,39 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 		bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
 		bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
 	}
+
+	bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 0);
+	bf_set(lpfc_reg_vfi_bbscn, reg_vfi, 0);
+	bbscn_fabric = (phba->fc_fabparam.cmn.bbRcvSizeMsb >> 4) & 0xF;
+
+	if (phba->bbcredit_support && phba->cfg_enable_bbcr  &&
+	    bbscn_fabric != 0) {
+		bbscn_max = bf_get(lpfc_bbscn_max,
+				   &phba->sli4_hba.bbscn_params);
+		if (bbscn_fabric <= bbscn_max) {
+			bbscn_def = bf_get(lpfc_bbscn_def,
+					   &phba->sli4_hba.bbscn_params);
+
+			if (bbscn_fabric > bbscn_def)
+				bf_set(lpfc_reg_vfi_bbscn, reg_vfi,
+				       bbscn_fabric);
+			else
+				bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_def);
+
+			bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 1);
+		}
+	}
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
 			"3134 Register VFI, mydid:x%x, fcfi:%d, "
 			" vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
-			" port_state:x%x topology chg:%d\n",
+			" port_state:x%x topology chg:%d bbscn_fabric :%d\n",
 			vport->fc_myDID,
 			phba->fcf.fcfi,
 			phba->sli4_hba.vfi_ids[vport->vfi],
 			phba->vpi_ids[vport->vpi],
 			reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
-			vport->port_state, phba->fc_topology_changed);
+			vport->port_state, phba->fc_topology_changed,
+			bbscn_fabric);
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index fcc05a1517c21d5134282e6cc9337ade5ee1a5c9..56faeb049b4ac50ec04783349fcea9b79c20583f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -97,8 +97,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 			i = SLI4_PAGE_SIZE;
 
 		phba->lpfc_sg_dma_buf_pool =
-			pci_pool_create("lpfc_sg_dma_buf_pool",
-					phba->pcidev,
+			dma_pool_create("lpfc_sg_dma_buf_pool",
+					&phba->pcidev->dev,
 					phba->cfg_sg_dma_buf_size,
 					i, 0);
 		if (!phba->lpfc_sg_dma_buf_pool)
@@ -106,15 +106,15 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 
 	} else {
 		phba->lpfc_sg_dma_buf_pool =
-			pci_pool_create("lpfc_sg_dma_buf_pool",
-					phba->pcidev, phba->cfg_sg_dma_buf_size,
+			dma_pool_create("lpfc_sg_dma_buf_pool",
+					&phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
 					align, 0);
 
 		if (!phba->lpfc_sg_dma_buf_pool)
 			goto fail;
 	}
 
-	phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
+	phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
 							LPFC_BPL_SIZE,
 							align, 0);
 	if (!phba->lpfc_mbuf_pool)
@@ -128,7 +128,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 	pool->max_count = 0;
 	pool->current_count = 0;
 	for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
-		pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
+		pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
 				       GFP_KERNEL, &pool->elements[i].phys);
 		if (!pool->elements[i].virt)
 			goto fail_free_mbuf_pool;
@@ -152,21 +152,21 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 						sizeof(struct lpfc_node_rrq));
 		if (!phba->rrq_pool)
 			goto fail_free_nlp_mem_pool;
-		phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
-					      phba->pcidev,
+		phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
+					      &phba->pcidev->dev,
 					      LPFC_HDR_BUF_SIZE, align, 0);
 		if (!phba->lpfc_hrb_pool)
 			goto fail_free_rrq_mem_pool;
 
-		phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
-					      phba->pcidev,
+		phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
+					      &phba->pcidev->dev,
 					      LPFC_DATA_BUF_SIZE, align, 0);
 		if (!phba->lpfc_drb_pool)
 			goto fail_free_hrb_pool;
 		phba->lpfc_hbq_pool = NULL;
 	} else {
-		phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
-			phba->pcidev, LPFC_BPL_SIZE, align, 0);
+		phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
+			&phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
 		if (!phba->lpfc_hbq_pool)
 			goto fail_free_nlp_mem_pool;
 		phba->lpfc_hrb_pool = NULL;
@@ -185,10 +185,10 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 
 	return 0;
 fail_free_drb_pool:
-	pci_pool_destroy(phba->lpfc_drb_pool);
+	dma_pool_destroy(phba->lpfc_drb_pool);
 	phba->lpfc_drb_pool = NULL;
  fail_free_hrb_pool:
-	pci_pool_destroy(phba->lpfc_hrb_pool);
+	dma_pool_destroy(phba->lpfc_hrb_pool);
 	phba->lpfc_hrb_pool = NULL;
  fail_free_rrq_mem_pool:
 	mempool_destroy(phba->rrq_pool);
@@ -201,14 +201,14 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 	phba->mbox_mem_pool = NULL;
  fail_free_mbuf_pool:
 	while (i--)
-		pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+		dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
 						 pool->elements[i].phys);
 	kfree(pool->elements);
  fail_free_lpfc_mbuf_pool:
-	pci_pool_destroy(phba->lpfc_mbuf_pool);
+	dma_pool_destroy(phba->lpfc_mbuf_pool);
 	phba->lpfc_mbuf_pool = NULL;
  fail_free_dma_buf_pool:
-	pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
 	phba->lpfc_sg_dma_buf_pool = NULL;
  fail:
 	return -ENOMEM;
@@ -218,8 +218,8 @@ int
 lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
 {
 	phba->lpfc_nvmet_drb_pool =
-		pci_pool_create("lpfc_nvmet_drb_pool",
-				phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
+		dma_pool_create("lpfc_nvmet_drb_pool",
+				&phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
 				SGL_ALIGN_SZ, 0);
 	if (!phba->lpfc_nvmet_drb_pool) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -248,20 +248,20 @@ lpfc_mem_free(struct lpfc_hba *phba)
 	/* Free HBQ pools */
 	lpfc_sli_hbqbuf_free_all(phba);
 	if (phba->lpfc_nvmet_drb_pool)
-		pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
+		dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
 	phba->lpfc_nvmet_drb_pool = NULL;
 	if (phba->lpfc_drb_pool)
-		pci_pool_destroy(phba->lpfc_drb_pool);
+		dma_pool_destroy(phba->lpfc_drb_pool);
 	phba->lpfc_drb_pool = NULL;
 	if (phba->lpfc_hrb_pool)
-		pci_pool_destroy(phba->lpfc_hrb_pool);
+		dma_pool_destroy(phba->lpfc_hrb_pool);
 	phba->lpfc_hrb_pool = NULL;
 	if (phba->txrdy_payload_pool)
-		pci_pool_destroy(phba->txrdy_payload_pool);
+		dma_pool_destroy(phba->txrdy_payload_pool);
 	phba->txrdy_payload_pool = NULL;
 
 	if (phba->lpfc_hbq_pool)
-		pci_pool_destroy(phba->lpfc_hbq_pool);
+		dma_pool_destroy(phba->lpfc_hbq_pool);
 	phba->lpfc_hbq_pool = NULL;
 
 	if (phba->rrq_pool)
@@ -282,15 +282,15 @@ lpfc_mem_free(struct lpfc_hba *phba)
 
 	/* Free MBUF memory pool */
 	for (i = 0; i < pool->current_count; i++)
-		pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+		dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
 			      pool->elements[i].phys);
 	kfree(pool->elements);
 
-	pci_pool_destroy(phba->lpfc_mbuf_pool);
+	dma_pool_destroy(phba->lpfc_mbuf_pool);
 	phba->lpfc_mbuf_pool = NULL;
 
 	/* Free DMA buffer memory pool */
-	pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
 	phba->lpfc_sg_dma_buf_pool = NULL;
 
 	/* Free Device Data memory pool */
@@ -379,7 +379,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
  * @handle: used to return the DMA-mapped address of the mbuf
  *
  * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
- * Allocates from generic pci_pool_alloc function first and if that fails and
+ * Allocates from generic dma_pool_alloc function first and if that fails and
  * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
  * HBA's pool.
  *
@@ -397,7 +397,7 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
 	unsigned long iflags;
 	void *ret;
 
-	ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
+	ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
@@ -433,7 +433,7 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
 		pool->elements[pool->current_count].phys = dma;
 		pool->current_count++;
 	} else {
-		pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
+		dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
 	}
 	return;
 }
@@ -470,7 +470,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
  * @handle: used to return the DMA-mapped address of the nvmet_buf
  *
  * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
- * PCI pool.  Allocates from generic pci_pool_alloc function.
+ * PCI pool.  Allocates from generic dma_pool_alloc function.
  *
  * Returns:
  *   pointer to the allocated nvmet_buf on success
@@ -481,7 +481,7 @@ lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
 {
 	void *ret;
 
-	ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
+	ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
 	return ret;
 }
 
@@ -497,7 +497,7 @@ lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
 void
 lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
 {
-	pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
+	dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
 }
 
 /**
@@ -522,7 +522,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 	if (!hbqbp)
 		return NULL;
 
-	hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+	hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
 					  &hbqbp->dbuf.phys);
 	if (!hbqbp->dbuf.virt) {
 		kfree(hbqbp);
@@ -547,7 +547,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 void
 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
 {
-	pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+	dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
 	kfree(hbqbp);
 	return;
 }
@@ -574,16 +574,16 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
 	if (!dma_buf)
 		return NULL;
 
-	dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+	dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
 					    &dma_buf->hbuf.phys);
 	if (!dma_buf->hbuf.virt) {
 		kfree(dma_buf);
 		return NULL;
 	}
-	dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+	dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
 					    &dma_buf->dbuf.phys);
 	if (!dma_buf->dbuf.virt) {
-		pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+		dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
 			      dma_buf->hbuf.phys);
 		kfree(dma_buf);
 		return NULL;
@@ -607,8 +607,8 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
 void
 lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
 {
-	pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
-	pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+	dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+	dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
 	kfree(dmab);
 }
 
@@ -634,16 +634,16 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
 	if (!dma_buf)
 		return NULL;
 
-	dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+	dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
 					    &dma_buf->hbuf.phys);
 	if (!dma_buf->hbuf.virt) {
 		kfree(dma_buf);
 		return NULL;
 	}
-	dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
+	dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
 					    GFP_KERNEL, &dma_buf->dbuf.phys);
 	if (!dma_buf->dbuf.virt) {
-		pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+		dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
 			      dma_buf->hbuf.phys);
 		kfree(dma_buf);
 		return NULL;
@@ -667,8 +667,8 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
 void
 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
 {
-	pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
-	pci_pool_free(phba->lpfc_nvmet_drb_pool,
+	dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+	dma_pool_free(phba->lpfc_nvmet_drb_pool,
 		      dmab->dbuf.virt, dmab->dbuf.phys);
 	kfree(dmab);
 }
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index f74cb0142fd4edac4e1378db1e85d14ec21c71a8..f3ad7cac355d3e96ffe99da8635dd643132ba5e8 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1724,6 +1724,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
 				lpfc_nvme_update_localport(vport);
 			}
 
+		} else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+
 		} else if (ndlp->nlp_fc4_type == 0) {
 			rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
 					 0, ndlp->nlp_DID);
@@ -1892,6 +1895,15 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			goto out;
 		}
 
+		/* When the rport rejected the FCP PRLI as unsupported.
+		 * This should only happen in Pt2Pt so an NVME PRLI
+		 * should be outstanding still.
+		 */
+		if (npr && ndlp->nlp_flag & NLP_FCP_PRLI_RJT) {
+			ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
+			goto out_err;
+		}
+
 		/* The LS Req had some error.  Don't let this be a
 		 * target.
 		 */
@@ -2189,12 +2201,15 @@ lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			  void *arg, uint32_t evt)
 {
 	/*
-	 * Take no action.  If a LOGO is outstanding, then possibly DevLoss has
-	 * timed out and is calling for Device Remove.  In this case, the LOGO
-	 * must be allowed to complete in state LOGO_ISSUE so that the rpi
-	 * and other NLP flags are correctly cleaned up.
+	 * DevLoss has timed out and is calling for Device Remove.
+	 * In this case, abort the LOGO and cleanup the ndlp
 	 */
-	return ndlp->nlp_state;
+
+	lpfc_unreg_rpi(vport, ndlp);
+	/* software abort outstanding PLOGI */
+	lpfc_els_abort(vport->phba, ndlp);
+	lpfc_drop_node(vport, ndlp);
+	return NLP_STE_FREED_NODE;
 }
 
 static uint32_t
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 0a0a1b92d01d6a08d3baa04d5e47cd08ccd2494e..79ba3ce063a4f847ce8187d3290ecf5a309f62b9 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -110,7 +110,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
 		qhandle->index = qidx;
 	}
 
-	lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
 			 "6073 Binding %s HdwQueue %d  (cpu %d) to "
 			 "io_channel %d qhandle %p\n", str,
 			 qidx, qhandle->cpu_id, qhandle->index, qhandle);
@@ -364,7 +364,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
 			 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
 
 	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
-	if (rc == WQE_ERROR) {
+	if (rc) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 				 "6045 Issue GEN REQ WQE to NPORT x%x "
 				 "Data: x%x x%x\n",
@@ -1270,7 +1270,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 	 * not exceed the programmed depth.
 	 */
 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
-		ret = -EAGAIN;
+		ret = -EBUSY;
 		goto out_fail;
 	}
 
@@ -1279,7 +1279,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
 				 "6065 driver's buffer pool is empty, "
 				 "IO failed\n");
-		ret = -ENOMEM;
+		ret = -EBUSY;
 		goto out_fail;
 	}
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1332,7 +1332,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 				 "sid: x%x did: x%x oxid: x%x\n",
 				 ret, vport->fc_myDID, ndlp->nlp_DID,
 				 lpfc_ncmd->cur_iocbq.sli4_xritag);
-		ret = -EBUSY;
 		goto out_free_nvme_buf;
 	}
 
@@ -1576,7 +1575,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
 	abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
 	ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
 	spin_unlock_irqrestore(&phba->hbalock, flags);
-	if (ret_val == IOCB_ERROR) {
+	if (ret_val) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
 				 "6137 Failed abts issue_wqe with status x%x "
 				 "for nvme_fcreq %p.\n",
@@ -1939,7 +1938,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
 		 * pci bus space for an I/O. The DMA buffer includes the
 		 * number of SGE's necessary to support the sg_tablesize.
 		 */
-		lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+		lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
 						 GFP_KERNEL,
 						 &lpfc_ncmd->dma_handle);
 		if (!lpfc_ncmd->data) {
@@ -1950,7 +1949,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
 
 		lxri = lpfc_sli4_next_xritag(phba);
 		if (lxri == NO_XRI) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
 			kfree(lpfc_ncmd);
 			break;
@@ -1961,7 +1960,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
 		/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
 		iotag = lpfc_sli_next_iotag(phba, pwqeq);
 		if (iotag == 0) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
 			kfree(lpfc_ncmd);
 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -2182,8 +2181,15 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
 		vport->localport = localport;
 		lport->vport = vport;
 		vport->nvmei_support = 1;
-		len  = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
-		vport->phba->total_nvme_bufs += len;
+
+		/* Don't post more new bufs if repost already recovered
+		 * the nvme sgls.
+		 */
+		if (phba->sli4_hba.nvme_xri_cnt == 0) {
+			len  = lpfc_new_nvme_buf(vport,
+						 phba->sli4_hba.nvme_xri_max);
+			vport->phba->total_nvme_bufs += len;
+		}
 	}
 
 	return ret;
@@ -2296,6 +2302,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 			 ndlp->nlp_DID, ndlp->nlp_type);
 
 	localport = vport->localport;
+	if (!localport)
+		return 0;
+
 	lport = (struct lpfc_nvme_lport *)localport->private;
 
 	/* NVME rports are not preserved across devloss.
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index bbbd0f84160d36563008a212afd8252f86ef15c8..0b7c1a49e203f8f31024d8756474084f8cacb529 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -170,12 +170,14 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct fc_frame_header *fc_hdr;
 	struct rqb_dmabuf *nvmebuf;
+	struct lpfc_nvmet_ctx_info *infop;
 	uint32_t *payload;
 	uint32_t size, oxid, sid, rc;
+	int cpu;
 	unsigned long iflag;
 
 	if (ctxp->txrdy) {
-		pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
+		dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
 			      ctxp->txrdy_phys);
 		ctxp->txrdy = NULL;
 		ctxp->txrdy_phys = 0;
@@ -267,11 +269,16 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
 	}
 	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
 
-	spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
-	list_add_tail(&ctx_buf->list,
-		      &phba->sli4_hba.lpfc_nvmet_ctx_put_list);
-	phba->sli4_hba.nvmet_ctx_put_cnt++;
-	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
+	/*
+	 * Use the CPU context list, from the MRQ the IO was received on
+	 * (ctxp->idx), to save context structure.
+	 */
+	cpu = smp_processor_id();
+	infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
+	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
+	list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
+	infop->nvmet_ctx_list_cnt++;
+	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
 #endif
 }
 
@@ -552,7 +559,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 		/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
 	} else {
 		ctxp->entry_cnt++;
-		start_clean = offsetof(struct lpfc_iocbq, wqe);
+		start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
 		memset(((char *)cmdwqe) + start_clean, 0,
 		       (sizeof(struct lpfc_iocbq) - start_clean));
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -879,51 +886,54 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
 };
 
 static void
-lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
+		struct lpfc_nvmet_ctx_info *infop)
 {
 	struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
 	unsigned long flags;
 
-	spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
-	spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
 	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
-			&phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
+				&infop->nvmet_ctx_list, list) {
 		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
 		list_del_init(&ctx_buf->list);
 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-		__lpfc_clear_active_sglq(phba,
-					 ctx_buf->sglq->sli4_lxritag);
+
+		__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
 		ctx_buf->sglq->state = SGL_FREED;
 		ctx_buf->sglq->ndlp = NULL;
 
 		spin_lock(&phba->sli4_hba.sgl_list_lock);
 		list_add_tail(&ctx_buf->sglq->list,
-			      &phba->sli4_hba.lpfc_nvmet_sgl_list);
+				&phba->sli4_hba.lpfc_nvmet_sgl_list);
 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
 
 		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
 		kfree(ctx_buf->context);
 	}
-	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
-			&phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
-		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-		list_del_init(&ctx_buf->list);
-		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-		__lpfc_clear_active_sglq(phba,
-					 ctx_buf->sglq->sli4_lxritag);
-		ctx_buf->sglq->state = SGL_FREED;
-		ctx_buf->sglq->ndlp = NULL;
+	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
+}
 
-		spin_lock(&phba->sli4_hba.sgl_list_lock);
-		list_add_tail(&ctx_buf->sglq->list,
-			      &phba->sli4_hba.lpfc_nvmet_sgl_list);
-		spin_unlock(&phba->sli4_hba.sgl_list_lock);
+static void
+lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+{
+	struct lpfc_nvmet_ctx_info *infop;
+	int i, j;
 
-		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
-		kfree(ctx_buf->context);
+	/* The first context list, MRQ 0 CPU 0 */
+	infop = phba->sli4_hba.nvmet_ctx_info;
+	if (!infop)
+		return;
+
+	/* Cycle the the entire CPU context list for every MRQ */
+	for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
+		for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
+			__lpfc_nvmet_clean_io_for_cpu(phba, infop);
+			infop++; /* next */
+		}
 	}
-	spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
+	kfree(phba->sli4_hba.nvmet_ctx_info);
+	phba->sli4_hba.nvmet_ctx_info = NULL;
 }
 
 static int
@@ -932,15 +942,71 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
 	struct lpfc_nvmet_ctxbuf *ctx_buf;
 	struct lpfc_iocbq *nvmewqe;
 	union lpfc_wqe128 *wqe;
-	int i;
+	struct lpfc_nvmet_ctx_info *last_infop;
+	struct lpfc_nvmet_ctx_info *infop;
+	int i, j, idx;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
 			"6403 Allocate NVMET resources for %d XRIs\n",
 			phba->sli4_hba.nvmet_xri_cnt);
 
+	phba->sli4_hba.nvmet_ctx_info = kcalloc(
+		phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
+		sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
+	if (!phba->sli4_hba.nvmet_ctx_info) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6419 Failed allocate memory for "
+				"nvmet context lists\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Assuming X CPUs in the system, and Y MRQs, allocate some
+	 * lpfc_nvmet_ctx_info structures as follows:
+	 *
+	 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
+	 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
+	 * ...
+	 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
+	 *
+	 * Each line represents a MRQ "silo" containing an entry for
+	 * every CPU.
+	 *
+	 * MRQ X is initially assumed to be associated with CPU X, thus
+	 * contexts are initially distributed across all MRQs using
+	 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
+	 * freed, the are freed to the MRQ silo based on the CPU number
+	 * of the IO completion. Thus a context that was allocated for MRQ A
+	 * whose IO completed on CPU B will be freed to cpuB/mrqA.
+	 */
+	infop = phba->sli4_hba.nvmet_ctx_info;
+	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+			INIT_LIST_HEAD(&infop->nvmet_ctx_list);
+			spin_lock_init(&infop->nvmet_ctx_list_lock);
+			infop->nvmet_ctx_list_cnt = 0;
+			infop++;
+		}
+	}
+
+	/*
+	 * Setup the next CPU context info ptr for each MRQ.
+	 * MRQ 0 will cycle thru CPUs 0 - X separately from
+	 * MRQ 1 cycling thru CPUs 0 - X, and so on.
+	 */
+	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+		last_infop = lpfc_get_ctx_list(phba, 0, j);
+		for (i = phba->sli4_hba.num_present_cpu - 1;  i >= 0; i--) {
+			infop = lpfc_get_ctx_list(phba, i, j);
+			infop->nvmet_ctx_next_cpu = last_infop;
+			last_infop = infop;
+		}
+	}
+
 	/* For all nvmet xris, allocate resources needed to process a
 	 * received command on a per xri basis.
 	 */
+	idx = 0;
 	for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
 		ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
 		if (!ctx_buf) {
@@ -977,7 +1043,6 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
 		/* Word 7 */
 		bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
 		bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
-		bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
 		/* Word 10 */
 		bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
 		bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
@@ -995,12 +1060,35 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
 					"6407 Ran out of NVMET XRIs\n");
 			return -ENOMEM;
 		}
-		spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-		list_add_tail(&ctx_buf->list,
-			      &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
-		spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+
+		/*
+		 * Add ctx to MRQidx context list. Our initial assumption
+		 * is MRQidx will be associated with CPUidx. This association
+		 * can change on the fly.
+		 */
+		infop = lpfc_get_ctx_list(phba, idx, idx);
+		spin_lock(&infop->nvmet_ctx_list_lock);
+		list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
+		infop->nvmet_ctx_list_cnt++;
+		spin_unlock(&infop->nvmet_ctx_list_lock);
+
+		/* Spread ctx structures evenly across all MRQs */
+		idx++;
+		if (idx >= phba->cfg_nvmet_mrq)
+			idx = 0;
+	}
+
+	infop = phba->sli4_hba.nvmet_ctx_info;
+	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+					"6408 TOTAL NVMET ctx for CPU %d "
+					"MRQ %d: cnt %d nextcpu %p\n",
+					i, j, infop->nvmet_ctx_list_cnt,
+					infop->nvmet_ctx_next_cpu);
+			infop++;
+		}
 	}
-	phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt;
 	return 0;
 }
 
@@ -1365,10 +1453,65 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 #endif
 }
 
+static struct lpfc_nvmet_ctxbuf *
+lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
+			     struct lpfc_nvmet_ctx_info *current_infop)
+{
+	struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
+	struct lpfc_nvmet_ctx_info *get_infop;
+	int i;
+
+	/*
+	 * The current_infop for the MRQ a NVME command IU was received
+	 * on is empty. Our goal is to replenish this MRQs context
+	 * list from a another CPUs.
+	 *
+	 * First we need to pick a context list to start looking on.
+	 * nvmet_ctx_start_cpu has available context the last time
+	 * we needed to replenish this CPU where nvmet_ctx_next_cpu
+	 * is just the next sequential CPU for this MRQ.
+	 */
+	if (current_infop->nvmet_ctx_start_cpu)
+		get_infop = current_infop->nvmet_ctx_start_cpu;
+	else
+		get_infop = current_infop->nvmet_ctx_next_cpu;
+
+	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+		if (get_infop == current_infop) {
+			get_infop = get_infop->nvmet_ctx_next_cpu;
+			continue;
+		}
+		spin_lock(&get_infop->nvmet_ctx_list_lock);
+
+		/* Just take the entire context list, if there are any */
+		if (get_infop->nvmet_ctx_list_cnt) {
+			list_splice_init(&get_infop->nvmet_ctx_list,
+				    &current_infop->nvmet_ctx_list);
+			current_infop->nvmet_ctx_list_cnt =
+				get_infop->nvmet_ctx_list_cnt - 1;
+			get_infop->nvmet_ctx_list_cnt = 0;
+			spin_unlock(&get_infop->nvmet_ctx_list_lock);
+
+			current_infop->nvmet_ctx_start_cpu = get_infop;
+			list_remove_head(&current_infop->nvmet_ctx_list,
+					 ctx_buf, struct lpfc_nvmet_ctxbuf,
+					 list);
+			return ctx_buf;
+		}
+
+		/* Otherwise, move on to the next CPU for this MRQ */
+		spin_unlock(&get_infop->nvmet_ctx_list_lock);
+		get_infop = get_infop->nvmet_ctx_next_cpu;
+	}
+
+	/* Nothing found, all contexts for the MRQ are in-flight */
+	return NULL;
+}
+
 /**
  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
  * @phba: pointer to lpfc hba data structure.
- * @pring: pointer to a SLI ring.
+ * @idx: relative index of MRQ vector
  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
  *
  * This routine is used for processing the WQE associated with a unsolicited
@@ -1380,22 +1523,26 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  **/
 static void
 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
-			    struct lpfc_sli_ring *pring,
+			    uint32_t idx,
 			    struct rqb_dmabuf *nvmebuf,
 			    uint64_t isr_timestamp)
 {
-#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
 	struct lpfc_nvmet_rcv_ctx *ctxp;
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct fc_frame_header *fc_hdr;
 	struct lpfc_nvmet_ctxbuf *ctx_buf;
+	struct lpfc_nvmet_ctx_info *current_infop;
 	uint32_t *payload;
 	uint32_t size, oxid, sid, rc, qno;
 	unsigned long iflag;
+	int current_cpu;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	uint32_t id;
 #endif
 
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return;
+
 	ctx_buf = NULL;
 	if (!nvmebuf || !phba->targetport) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1407,31 +1554,24 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
 		goto dropit;
 	}
 
-	spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
-	if (phba->sli4_hba.nvmet_ctx_get_cnt) {
-		list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
+	/*
+	 * Get a pointer to the context list for this MRQ based on
+	 * the CPU this MRQ IRQ is associated with. If the CPU association
+	 * changes from our initial assumption, the context list could
+	 * be empty, thus it would need to be replenished with the
+	 * context list from another CPU for this MRQ.
+	 */
+	current_cpu = smp_processor_id();
+	current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
+	spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
+	if (current_infop->nvmet_ctx_list_cnt) {
+		list_remove_head(&current_infop->nvmet_ctx_list,
 				 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
-		phba->sli4_hba.nvmet_ctx_get_cnt--;
+		current_infop->nvmet_ctx_list_cnt--;
 	} else {
-		spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		if (phba->sli4_hba.nvmet_ctx_put_cnt) {
-			list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list,
-				    &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
-			INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
-			phba->sli4_hba.nvmet_ctx_get_cnt =
-				phba->sli4_hba.nvmet_ctx_put_cnt;
-			phba->sli4_hba.nvmet_ctx_put_cnt = 0;
-			spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-
-			list_remove_head(
-				&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
-				ctx_buf, struct lpfc_nvmet_ctxbuf, list);
-			phba->sli4_hba.nvmet_ctx_get_cnt--;
-		} else {
-			spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-		}
+		ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
 	}
-	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
+	spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
 
 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
@@ -1483,6 +1623,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
 	ctxp->size = size;
 	ctxp->oxid = oxid;
 	ctxp->sid = sid;
+	ctxp->idx = idx;
 	ctxp->state = LPFC_NVMET_STE_RCV;
 	ctxp->entry_cnt = 1;
 	ctxp->flag = 0;
@@ -1556,7 +1697,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
 
 	if (nvmebuf)
 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
-#endif
 }
 
 /**
@@ -1591,7 +1731,7 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 /**
  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
  * @phba: pointer to lpfc hba data structure.
- * @pring: pointer to a SLI ring.
+ * @idx: relative index of MRQ vector
  * @nvmebuf: pointer to received nvme data structure.
  *
  * This routine is used to process an unsolicited event received from a SLI
@@ -1602,7 +1742,7 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  **/
 void
 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
-			   struct lpfc_sli_ring *pring,
+			   uint32_t idx,
 			   struct rqb_dmabuf *nvmebuf,
 			   uint64_t isr_timestamp)
 {
@@ -1610,7 +1750,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
 		return;
 	}
-	lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
+	lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
 				    isr_timestamp);
 }
 
@@ -1863,6 +2003,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
 		       nvmewqe->sli4_xritag);
 
 		/* Word 7 */
+		bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1);
 		bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
 
 		/* Word 8 */
@@ -1939,7 +2080,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
 
 	case NVMET_FCOP_WRITEDATA:
 		/* Words 0 - 2 : The first sg segment */
-		txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
+		txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
 				       GFP_KERNEL, &physaddr);
 		if (!txrdy) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1971,6 +2112,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
 		       nvmewqe->sli4_xritag);
 
 		/* Word 7 */
+		bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1);
 		bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
 		bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
 		       CMD_FCP_TRECEIVE64_WQE);
@@ -2054,6 +2196,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
 		       nvmewqe->sli4_xritag);
 
 		/* Word 7 */
+		bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0);
 		bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
 		bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
 
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 48a76788b003cb746afa45376272af362c4446ce..25a65b0bb7f33a8f764c8d0ac816fa98fcd85434 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -74,6 +74,19 @@ struct lpfc_nvmet_tgtport {
 	atomic_t xmt_abort_rsp_error;
 };
 
+struct lpfc_nvmet_ctx_info {
+	struct list_head nvmet_ctx_list;
+	spinlock_t	nvmet_ctx_list_lock; /* lock per CPU */
+	struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
+	struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
+	uint16_t	nvmet_ctx_list_cnt;
+	char pad[16];  /* pad to a cache-line */
+};
+
+/* This retrieves the context info associated with the specified cpu / mrq */
+#define lpfc_get_ctx_list(phba, cpu, mrq)  \
+	(phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
+
 struct lpfc_nvmet_rcv_ctx {
 	union {
 		struct nvmefc_tgt_ls_req ls_req;
@@ -92,6 +105,7 @@ struct lpfc_nvmet_rcv_ctx {
 	uint16_t size;
 	uint16_t entry_cnt;
 	uint16_t cpu;
+	uint16_t idx;
 	uint16_t state;
 	/* States */
 #define LPFC_NVMET_STE_LS_RCV		1
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index adc7845390611f8ec03b85e43a4a264ea0d3643d..1a6f122bb25db6697f7a5e705fdba992cf2bd9fd 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -416,7 +416,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
 		 * necessary to support the sg_tablesize.
 		 */
-		psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
 					GFP_KERNEL, &psb->dma_handle);
 		if (!psb->data) {
 			kfree(psb);
@@ -427,7 +427,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 		/* Allocate iotag for psb->cur_iocbq. */
 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
 		if (iotag == 0) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      psb->data, psb->dma_handle);
 			kfree(psb);
 			break;
@@ -826,7 +826,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 		 * for the struct fcp_cmnd, struct fcp_rsp and the number
 		 * of bde's necessary to support the sg_tablesize.
 		 */
-		psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
 						GFP_KERNEL, &psb->dma_handle);
 		if (!psb->data) {
 			kfree(psb);
@@ -839,7 +839,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 		 */
 		if (phba->cfg_enable_bg  && (((unsigned long)(psb->data) &
 		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      psb->data, psb->dma_handle);
 			kfree(psb);
 			break;
@@ -848,7 +848,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 
 		lxri = lpfc_sli4_next_xritag(phba);
 		if (lxri == NO_XRI) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      psb->data, psb->dma_handle);
 			kfree(psb);
 			break;
@@ -857,7 +857,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 		/* Allocate iotag for psb->cur_iocbq. */
 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
 		if (iotag == 0) {
-			pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
 				      psb->data, psb->dma_handle);
 			kfree(psb);
 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e948ea05fd3387ddabaa0b0bc02b66337c20b3e9..8b119f87b51d67382b89aa60500d9ffe6f18ab19 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -80,8 +80,8 @@ static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
 				    struct lpfc_cqe *);
 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
 				       int);
-static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
-			uint32_t);
+static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
+				    struct lpfc_eqe *eqe, uint32_t qidx);
 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
@@ -106,7 +106,7 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
  * -ENOMEM.
  * The caller is expected to hold the hbalock when calling this routine.
  **/
-static uint32_t
+static int
 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
 {
 	union lpfc_wqe *temp_wqe;
@@ -123,7 +123,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
 	idx = ((q->host_index + 1) % q->entry_count);
 	if (idx == q->hba_index) {
 		q->WQ_overflow++;
-		return -ENOMEM;
+		return -EBUSY;
 	}
 	q->WQ_posted++;
 	/* set consumption flag every once in a while */
@@ -10741,7 +10741,7 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	abtsiocbp->vport = vport;
 	abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
 	retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
-	if (retval == IOCB_ERROR) {
+	if (retval) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
 				 "6147 Failed abts issue_wqe with status x%x "
 				 "for oxid x%x\n",
@@ -13010,7 +13010,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  * completion queue, and then return.
  *
  **/
-static void
+static int
 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 	struct lpfc_queue *speq)
 {
@@ -13034,7 +13034,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 					"0365 Slow-path CQ identifier "
 					"(%d) does not exist\n", cqid);
-		return;
+		return 0;
 	}
 
 	/* Save EQ associated with this CQ */
@@ -13071,7 +13071,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0370 Invalid completion queue type (%d)\n",
 				cq->type);
-		return;
+		return 0;
 	}
 
 	/* Catch the no cq entry condition, log an error */
@@ -13086,6 +13086,8 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 	/* wake up worker thread if there are works to be done */
 	if (workposted)
 		lpfc_worker_wake_up(phba);
+
+	return ecount;
 }
 
 /**
@@ -13289,7 +13291,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
 		if (fc_hdr->fh_type == FC_TYPE_FCP) {
 			dma_buf->bytes_recv = bf_get(lpfc_rcqe_length,  rcqe);
 			lpfc_nvmet_unsol_fcp_event(
-				phba, phba->sli4_hba.els_wq->pring, dma_buf,
+				phba, idx, dma_buf,
 				cq->assoc_qp->isr_timestamp);
 			return false;
 		}
@@ -13393,7 +13395,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  * queue and process all the entries on the completion queue, rearm the
  * completion queue, and then return.
  **/
-static void
+static int
 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 			uint32_t qidx)
 {
@@ -13409,7 +13411,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 				"event: majorcode=x%x, minorcode=x%x\n",
 				bf_get_le32(lpfc_eqe_major_code, eqe),
 				bf_get_le32(lpfc_eqe_minor_code, eqe));
-		return;
+		return 0;
 	}
 
 	/* Get the reference to the corresponding CQ */
@@ -13446,8 +13448,9 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 
 	/* Otherwise this is a Slow path event */
 	if (cq == NULL) {
-		lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
-		return;
+		ecount = lpfc_sli4_sp_handle_eqe(phba, eqe,
+						 phba->sli4_hba.hba_eq[qidx]);
+		return ecount;
 	}
 
 process_cq:
@@ -13456,7 +13459,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 				"0368 Miss-matched fast-path completion "
 				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
 				cqid, cq->queue_id);
-		return;
+		return 0;
 	}
 
 	/* Save EQ associated with this CQ */
@@ -13486,6 +13489,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 	/* wake up worker thread if there are works to be done */
 	if (workposted)
 		lpfc_worker_wake_up(phba);
+
+	return ecount;
 }
 
 static void
@@ -13706,6 +13711,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
 	struct lpfc_eqe *eqe;
 	unsigned long iflag;
 	int ecount = 0;
+	int ccount = 0;
 	int hba_eqidx;
 
 	/* Get the driver's phba structure from the dev_id */
@@ -13757,8 +13763,9 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
 		if (eqe == NULL)
 			break;
 
-		lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
-		if (!(++ecount % fpeq->entry_repost))
+		ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
+		if (!(++ecount % fpeq->entry_repost) ||
+		    ccount > LPFC_MAX_ISR_CQE)
 			break;
 		fpeq->EQ_processed++;
 	}
@@ -17051,7 +17058,7 @@ lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	struct lpfc_dmabuf *pcmd = cmdiocb->context2;
 
 	if (pcmd && pcmd->virt)
-		pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
 	kfree(pcmd);
 	lpfc_sli_release_iocbq(phba, cmdiocb);
 }
@@ -17079,7 +17086,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
 	/* Allocate buffer for command payload */
 	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 	if (pcmd)
-		pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+		pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
 					    &pcmd->phys);
 	if (!pcmd || !pcmd->virt)
 		goto exit;
@@ -17128,7 +17135,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 			"2023 Unable to process MDS loopback frame\n");
 	if (pcmd && pcmd->virt)
-		pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
 	kfree(pcmd);
 	lpfc_sli_release_iocbq(phba, iocbq);
 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
@@ -18888,6 +18895,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
 	struct lpfc_sglq *sglq;
 	struct lpfc_sli_ring *pring;
 	unsigned long iflags;
+	uint32_t ret = 0;
 
 	/* NVME_LS and NVME_LS ABTS requests. */
 	if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
@@ -18906,10 +18914,12 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
 		}
 		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
 		       pwqe->sli4_xritag);
-		if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) {
+		ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
+		if (ret) {
 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
-			return WQE_ERROR;
+			return ret;
 		}
+
 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
 		return 0;
@@ -18924,9 +18934,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
 		wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
 		bf_set(wqe_cqid, &wqe->generic.wqe_com,
 		      phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
-		if (lpfc_sli4_wq_put(wq, wqe)) {
+		ret = lpfc_sli4_wq_put(wq, wqe);
+		if (ret) {
 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
-			return WQE_ERROR;
+			return ret;
 		}
 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -18950,9 +18961,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
 		wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
 		bf_set(wqe_cqid, &wqe->generic.wqe_com,
 		      phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
-		if (lpfc_sli4_wq_put(wq, wqe)) {
+		ret = lpfc_sli4_wq_put(wq, wqe);
+		if (ret) {
 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
-			return WQE_ERROR;
+			return ret;
 		}
 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 7a1d74e9e877057397921b644adbd5413e8419c3..60200385fe009bfff2ca3f03af6a2607a6b027f4 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -158,6 +158,7 @@ struct lpfc_queue {
 #define LPFC_MQ_REPOST		8
 #define LPFC_CQ_REPOST		64
 #define LPFC_RQ_REPOST		64
+#define LPFC_MAX_ISR_CQE	64
 #define LPFC_RELEASE_NOTIFICATION_INTERVAL	32  /* For WQs */
 	uint32_t queue_id;	/* Queue ID assigned by the hardware */
 	uint32_t assoc_qid;     /* Queue ID associated with, for CQ/WQ/MQ */
@@ -419,6 +420,20 @@ struct lpfc_hba_eq_hdl {
 #define LPFC_MULTI_CPU_AFFINITY 0xffffffff
 };
 
+/*BB Credit recovery value*/
+struct lpfc_bbscn_params {
+	uint32_t word0;
+#define lpfc_bbscn_min_SHIFT		0
+#define lpfc_bbscn_min_MASK		0x0000000F
+#define lpfc_bbscn_min_WORD		word0
+#define lpfc_bbscn_max_SHIFT		4
+#define lpfc_bbscn_max_MASK		0x0000000F
+#define lpfc_bbscn_max_WORD		word0
+#define lpfc_bbscn_def_SHIFT		8
+#define lpfc_bbscn_def_MASK		0x0000000F
+#define lpfc_bbscn_def_WORD		word0
+};
+
 /* Port Capabilities for SLI4 Parameters */
 struct lpfc_pc_sli4_params {
 	uint32_t supported;
@@ -550,6 +565,7 @@ struct lpfc_sli4_hba {
 	uint32_t ue_to_rp;
 	struct lpfc_register sli_intf;
 	struct lpfc_pc_sli4_params pc_sli4_params;
+	struct lpfc_bbscn_params bbscn_params;
 	struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
 
 	/* Pointers to the constructed SLI4 queues */
@@ -621,8 +637,6 @@ struct lpfc_sli4_hba {
 	uint16_t scsi_xri_start;
 	uint16_t els_xri_cnt;
 	uint16_t nvmet_xri_cnt;
-	uint16_t nvmet_ctx_get_cnt;
-	uint16_t nvmet_ctx_put_cnt;
 	uint16_t nvmet_io_wait_cnt;
 	uint16_t nvmet_io_wait_total;
 	struct list_head lpfc_els_sgl_list;
@@ -631,9 +645,8 @@ struct lpfc_sli4_hba {
 	struct list_head lpfc_abts_nvmet_ctx_list;
 	struct list_head lpfc_abts_scsi_buf_list;
 	struct list_head lpfc_abts_nvme_buf_list;
-	struct list_head lpfc_nvmet_ctx_get_list;
-	struct list_head lpfc_nvmet_ctx_put_list;
 	struct list_head lpfc_nvmet_io_wait_list;
+	struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
 	struct lpfc_sglq **lpfc_sglq_active_list;
 	struct list_head lpfc_rpi_hdr_list;
 	unsigned long *rpi_bmask;
@@ -664,8 +677,6 @@ struct lpfc_sli4_hba {
 	spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
 	spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
 	spinlock_t sgl_list_lock; /* list of aborted els IOs */
-	spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */
-	spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */
 	spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
 	uint32_t physical_port;
 
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c6a24c3e2d5e20666a291d327fda4e78c989576b..6aa192b3e4bff8c90898ab6985518ff96b6651de 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "11.4.0.1"
+#define LPFC_DRIVER_VERSION "11.4.0.3"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index a6682c508c4cbb800a19fd8b010a551f404d55b3..8c4d3003b68b2ed087b73ec41f036105b367f754 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -448,15 +448,14 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
 		ioremap(macio_resource_start(mdev, 1), 0x1000);
 	state->dmaintr = macio_irq(mdev, 1);
 	if (state->regs == NULL || state->dma == NULL) {
-		printk(KERN_ERR "mac53c94: ioremap failed for %s\n",
-		       node->full_name);
+		printk(KERN_ERR "mac53c94: ioremap failed for %pOF\n", node);
 		goto out_free;
 	}
 
 	clkprop = of_get_property(node, "clock-frequency", &proplen);
        	if (clkprop == NULL || proplen != sizeof(int)) {
-       		printk(KERN_ERR "%s: can't get clock frequency, "
-       		       "assuming 25MHz\n", node->full_name);
+       		printk(KERN_ERR "%pOF: can't get clock frequency, "
+       		       "assuming 25MHz\n", node);
        		state->clk_freq = 25000000;
        	} else
        		state->clk_freq = *(int *)clkprop;
@@ -469,7 +468,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
        				sizeof(struct dbdma_cmd), GFP_KERNEL);
        	if (dma_cmd_space == 0) {
        		printk(KERN_ERR "mac53c94: couldn't allocate dma "
-       		       "command space for %s\n", node->full_name);
+       		       "command space for %pOF\n", node);
 		rc = -ENOMEM;
        		goto out_free;
        	}
@@ -481,8 +480,8 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
 	mac53c94_init(state);
 
 	if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) {
-		printk(KERN_ERR "mac53C94: can't get irq %d for %s\n",
-		       state->intr, node->full_name);
+		printk(KERN_ERR "mac53C94: can't get irq %d for %pOF\n",
+		       state->intr, node);
 		goto out_free_dma;
 	}
 
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index cdb61eaa2d1f824f8d2082b0a0cb3942c9cab7f0..eb551f3cc471ded0fe22118c35a6e922349f7f28 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -348,26 +348,24 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
 				 u32 dma_count, int write, u8 cmd)
 {
 	struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
-	u8 *fifo = esp->regs + ESP_FDATA * 16;
+	u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
+	u8 phase = esp->sreg & ESP_STAT_PMASK;
 
 	cmd &= ~ESP_CMD_DMA;
 	mep->error = 0;
 
 	if (write) {
+		u8 *dst = (u8 *)addr;
+		u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
+
 		scsi_esp_cmd(esp, cmd);
 
 		while (1) {
-			unsigned int n;
-
-			n = mac_esp_wait_for_fifo(esp);
-			if (!n)
+			if (!mac_esp_wait_for_fifo(esp))
 				break;
 
-			if (n > esp_count)
-				n = esp_count;
-			esp_count -= n;
-
-			MAC_ESP_PIO_LOOP("%2@,%0@+", n);
+			*dst++ = esp_read8(ESP_FDATA);
+			--esp_count;
 
 			if (!esp_count)
 				break;
@@ -375,14 +373,17 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
 			if (mac_esp_wait_for_intr(esp))
 				break;
 
-			if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) &&
-			    ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP))
+			if ((esp->sreg & ESP_STAT_PMASK) != phase)
 				break;
 
 			esp->ireg = esp_read8(ESP_INTRPT);
-			if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
-			    ESP_INTR_BSERV)
+			if (esp->ireg & mask) {
+				mep->error = 1;
 				break;
+			}
+
+			if (phase == ESP_MIP)
+				scsi_esp_cmd(esp, ESP_CMD_MOK);
 
 			scsi_esp_cmd(esp, ESP_CMD_TI);
 		}
@@ -402,14 +403,14 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
 			if (mac_esp_wait_for_intr(esp))
 				break;
 
-			if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) &&
-			    ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP))
+			if ((esp->sreg & ESP_STAT_PMASK) != phase)
 				break;
 
 			esp->ireg = esp_read8(ESP_INTRPT);
-			if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
-			    ESP_INTR_BSERV)
+			if (esp->ireg & ~ESP_INTR_BSERV) {
+				mep->error = 1;
 				break;
+			}
 
 			n = MAC_ESP_FIFO_SIZE -
 			    (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 196acc79714b9da9cffa92c52a1c2c188da59508..dd6057359d7c6e681544b9f7b2be5393b39c7f1a 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -41,7 +41,7 @@
 #define NCR5380_intr                    macscsi_intr
 #define NCR5380_queue_command           macscsi_queue_command
 #define NCR5380_abort                   macscsi_abort
-#define NCR5380_bus_reset               macscsi_bus_reset
+#define NCR5380_host_reset              macscsi_host_reset
 #define NCR5380_info                    macscsi_info
 
 #include "NCR5380.h"
@@ -328,7 +328,7 @@ static struct scsi_host_template mac_scsi_template = {
 	.info			= macscsi_info,
 	.queuecommand		= macscsi_queue_command,
 	.eh_abort_handler	= macscsi_abort,
-	.eh_bus_reset_handler	= macscsi_bus_reset,
+	.eh_host_reset_handler	= macscsi_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= 1,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 3c63c292cb92d5bc92d297ca8bef61ec643d0140..7195cff51d4c4181f60ab3330796818771590798 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -311,13 +311,15 @@ mega_query_adapter(adapter_t *adapter)
 	   right 8 bits making them zero. This 0 value was hardcoded to fix
 	   sparse warnings. */
 	if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) {
-		sprintf (adapter->fw_version, "%c%d%d.%d%d",
+		snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+			 "%c%d%d.%d%d",
 			 adapter->product_info.fw_version[2],
 			 0,
 			 adapter->product_info.fw_version[1] & 0x0f,
 			 0,
 			 adapter->product_info.fw_version[0] & 0x0f);
-		sprintf (adapter->bios_version, "%c%d%d.%d%d",
+		snprintf(adapter->bios_version, sizeof(adapter->fw_version),
+			 "%c%d%d.%d%d",
 			 adapter->product_info.bios_version[2],
 			 0,
 			 adapter->product_info.bios_version[1] & 0x0f,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index f0987f22ea70be2f13cf434120155112fcbcc9dc..ec3c4385497865399c7d4cab92b0c6cd8558a7aa 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -341,8 +341,6 @@ static struct scsi_host_template megaraid_template_g = {
 	.proc_name			= "megaraid",
 	.queuecommand			= megaraid_queue_command,
 	.eh_abort_handler		= megaraid_abort_handler,
-	.eh_device_reset_handler	= megaraid_reset_handler,
-	.eh_bus_reset_handler		= megaraid_reset_handler,
 	.eh_host_reset_handler		= megaraid_reset_handler,
 	.change_queue_depth		= scsi_change_queue_depth,
 	.use_clustering			= ENABLE_CLUSTERING,
@@ -1153,8 +1151,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 
 
 	// Allocate memory for 16-bytes aligned mailboxes
-	raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
-						adapter->pdev,
+	raid_dev->mbox_pool_handle = dma_pool_create("megaraid mbox pool",
+						&adapter->pdev->dev,
 						sizeof(mbox64_t) + 16,
 						16, 0);
 
@@ -1164,7 +1162,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 
 	mbox_pci_blk = raid_dev->mbox_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
-		mbox_pci_blk[i].vaddr = pci_pool_alloc(
+		mbox_pci_blk[i].vaddr = dma_pool_alloc(
 						raid_dev->mbox_pool_handle,
 						GFP_KERNEL,
 						&mbox_pci_blk[i].dma_addr);
@@ -1181,8 +1179,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 	 * share common memory pool. Passthru structures piggyback on memory
 	 * allocted to extended passthru since passthru is smaller of the two
 	 */
-	raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
-			adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
+	raid_dev->epthru_pool_handle = dma_pool_create("megaraid mbox pthru",
+			&adapter->pdev->dev, sizeof(mraid_epassthru_t), 128, 0);
 
 	if (raid_dev->epthru_pool_handle == NULL) {
 		goto fail_setup_dma_pool;
@@ -1190,7 +1188,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 
 	epthru_pci_blk = raid_dev->epthru_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
-		epthru_pci_blk[i].vaddr = pci_pool_alloc(
+		epthru_pci_blk[i].vaddr = dma_pool_alloc(
 						raid_dev->epthru_pool_handle,
 						GFP_KERNEL,
 						&epthru_pci_blk[i].dma_addr);
@@ -1202,8 +1200,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 
 	// Allocate memory for each scatter-gather list. Request for 512 bytes
 	// alignment for each sg list
-	raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
-					adapter->pdev,
+	raid_dev->sg_pool_handle = dma_pool_create("megaraid mbox sg",
+					&adapter->pdev->dev,
 					sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
 					512, 0);
 
@@ -1213,7 +1211,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
 
 	sg_pci_blk = raid_dev->sg_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
-		sg_pci_blk[i].vaddr = pci_pool_alloc(
+		sg_pci_blk[i].vaddr = dma_pool_alloc(
 						raid_dev->sg_pool_handle,
 						GFP_KERNEL,
 						&sg_pci_blk[i].dma_addr);
@@ -1249,29 +1247,29 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
 
 	sg_pci_blk = raid_dev->sg_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
-		pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
+		dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
 			sg_pci_blk[i].dma_addr);
 	}
 	if (raid_dev->sg_pool_handle)
-		pci_pool_destroy(raid_dev->sg_pool_handle);
+		dma_pool_destroy(raid_dev->sg_pool_handle);
 
 
 	epthru_pci_blk = raid_dev->epthru_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
-		pci_pool_free(raid_dev->epthru_pool_handle,
+		dma_pool_free(raid_dev->epthru_pool_handle,
 			epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
 	}
 	if (raid_dev->epthru_pool_handle)
-		pci_pool_destroy(raid_dev->epthru_pool_handle);
+		dma_pool_destroy(raid_dev->epthru_pool_handle);
 
 
 	mbox_pci_blk = raid_dev->mbox_pool;
 	for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
-		pci_pool_free(raid_dev->mbox_pool_handle,
+		dma_pool_free(raid_dev->mbox_pool_handle,
 			mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
 	}
 	if (raid_dev->mbox_pool_handle)
-		pci_pool_destroy(raid_dev->mbox_pool_handle);
+		dma_pool_destroy(raid_dev->mbox_pool_handle);
 
 	return;
 }
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 544d6f7e6138e203332c792c4e98ac4532f17f64..65b6f6ace3a5334fc3c81b9480871abe3fe2ad04 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -574,7 +574,7 @@ mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
 
 	kioc->pool_index	= right_pool;
 	kioc->free_buf		= 1;
-	kioc->buf_vaddr		= pci_pool_alloc(pool->handle, GFP_ATOMIC,
+	kioc->buf_vaddr		= dma_pool_alloc(pool->handle, GFP_ATOMIC,
 							&kioc->buf_paddr);
 	spin_unlock_irqrestore(&pool->lock, flags);
 
@@ -658,7 +658,7 @@ mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
 		 * not in use
 		 */
 		if (kioc->free_buf == 1)
-			pci_pool_free(pool->handle, kioc->buf_vaddr, 
+			dma_pool_free(pool->handle, kioc->buf_vaddr, 
 							kioc->buf_paddr);
 		else
 			pool->in_use = 0;
@@ -940,8 +940,8 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
 						GFP_KERNEL);
 	adapter->mbox_list	= kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
 						GFP_KERNEL);
-	adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool",
-						adapter->pdev,
+	adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
+						&adapter->pdev->dev,
 						sizeof(mraid_passthru_t),
 						16, 0);
 
@@ -970,7 +970,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
 
 		kioc		= adapter->kioc_list + i;
 		kioc->cmdbuf	= (uint64_t)(unsigned long)(mbox_list + i);
-		kioc->pthru32	= pci_pool_alloc(adapter->pthru_dma_pool,
+		kioc->pthru32	= dma_pool_alloc(adapter->pthru_dma_pool,
 						GFP_KERNEL, &kioc->pthru32_h);
 
 		if (!kioc->pthru32) {
@@ -1006,7 +1006,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
 	for (i = 0; i < lld_adp->max_kioc; i++) {
 		kioc = adapter->kioc_list + i;
 		if (kioc->pthru32) {
-			pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
+			dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
 				kioc->pthru32_h);
 		}
 	}
@@ -1017,7 +1017,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
 	kfree(adapter->mbox_list);
 
 	if (adapter->pthru_dma_pool)
-		pci_pool_destroy(adapter->pthru_dma_pool);
+		dma_pool_destroy(adapter->pthru_dma_pool);
 
 	kfree(adapter);
 
@@ -1086,14 +1086,15 @@ mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
 		pool->buf_size = bufsize;
 		spin_lock_init(&pool->lock);
 
-		pool->handle = pci_pool_create("megaraid mm data buffer",
-						adp->pdev, bufsize, 16, 0);
+		pool->handle = dma_pool_create("megaraid mm data buffer",
+						&adp->pdev->dev, bufsize,
+						16, 0);
 
 		if (!pool->handle) {
 			goto dma_pool_setup_error;
 		}
 
-		pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
+		pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
 							&pool->paddr);
 
 		if (!pool->vaddr)
@@ -1163,14 +1164,14 @@ mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
 
 		kioc = adp->kioc_list + i;
 
-		pci_pool_free(adp->pthru_dma_pool, kioc->pthru32,
+		dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
 				kioc->pthru32_h);
 	}
 
 	kfree(adp->kioc_list);
 	kfree(adp->mbox_list);
 
-	pci_pool_destroy(adp->pthru_dma_pool);
+	dma_pool_destroy(adp->pthru_dma_pool);
 
 
 	return;
@@ -1194,10 +1195,10 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
 		if (pool->handle) {
 
 			if (pool->vaddr)
-				pci_pool_free(pool->handle, pool->vaddr,
+				dma_pool_free(pool->handle, pool->vaddr,
 							pool->paddr);
 
-			pci_pool_destroy(pool->handle);
+			dma_pool_destroy(pool->handle);
 			pool->handle = NULL;
 		}
 	}
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 2b209bbb4c9165fa7afdeff0f233f649684f8495..a6722c93a29524ee594438047178eeea564ea414 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"07.701.17.00-rc1"
-#define MEGASAS_RELDATE				"March 2, 2017"
+#define MEGASAS_VERSION				"07.702.06.00-rc1"
+#define MEGASAS_RELDATE				"June 21, 2017"
 
 /*
  * Device IDs
@@ -2115,7 +2115,6 @@ struct megasas_instance {
 	u32 *crash_dump_buf;
 	dma_addr_t crash_dump_h;
 	void *crash_buf[MAX_CRASH_DUMP_SIZE];
-	u32 crash_buf_pages;
 	unsigned int    fw_crash_buffer_size;
 	unsigned int    fw_crash_state;
 	unsigned int    fw_crash_buffer_offset;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 71c4746341ea379e3881d32cf3078f135d15f999..e518dadc81615fd4e981b9127e840b9c7adc2d2b 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -49,6 +49,7 @@
 #include <linux/blkdev.h>
 #include <linux/mutex.h>
 #include <linux/poll.h>
+#include <linux/vmalloc.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -1995,9 +1996,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
 				if (cmd_mfi->sync_cmd &&
-					cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
+					cmd_mfi->frame->hdr.cmd_status =
+							MFI_STAT_WRONG_STATE;
 					megasas_complete_cmd(instance,
 							     cmd_mfi, DID_OK);
+				}
 			}
 		}
 	} else {
@@ -2791,7 +2795,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
 		cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
 		if (cmd)
 			megasas_dump_frame(cmd->io_request,
-				sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+				MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
 		ret = megasas_reset_fusion(scmd->device->host,
 				SCSIIO_TIMEOUT_OCR);
 	} else
@@ -3862,19 +3866,19 @@ static void megasas_teardown_frame_pool(struct megasas_instance *instance)
 		cmd = instance->cmd_list[i];
 
 		if (cmd->frame)
-			pci_pool_free(instance->frame_dma_pool, cmd->frame,
+			dma_pool_free(instance->frame_dma_pool, cmd->frame,
 				      cmd->frame_phys_addr);
 
 		if (cmd->sense)
-			pci_pool_free(instance->sense_dma_pool, cmd->sense,
+			dma_pool_free(instance->sense_dma_pool, cmd->sense,
 				      cmd->sense_phys_addr);
 	}
 
 	/*
 	 * Now destroy the pool itself
 	 */
-	pci_pool_destroy(instance->frame_dma_pool);
-	pci_pool_destroy(instance->sense_dma_pool);
+	dma_pool_destroy(instance->frame_dma_pool);
+	dma_pool_destroy(instance->sense_dma_pool);
 
 	instance->frame_dma_pool = NULL;
 	instance->sense_dma_pool = NULL;
@@ -3925,22 +3929,23 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
 	/*
 	 * Use DMA pool facility provided by PCI layer
 	 */
-	instance->frame_dma_pool = pci_pool_create("megasas frame pool",
-					instance->pdev, instance->mfi_frame_size,
-					256, 0);
+	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
+					&instance->pdev->dev,
+					instance->mfi_frame_size, 256, 0);
 
 	if (!instance->frame_dma_pool) {
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
 		return -ENOMEM;
 	}
 
-	instance->sense_dma_pool = pci_pool_create("megasas sense pool",
-						   instance->pdev, 128, 4, 0);
+	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
+						   &instance->pdev->dev, 128,
+						   4, 0);
 
 	if (!instance->sense_dma_pool) {
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
 
-		pci_pool_destroy(instance->frame_dma_pool);
+		dma_pool_destroy(instance->frame_dma_pool);
 		instance->frame_dma_pool = NULL;
 
 		return -ENOMEM;
@@ -3955,10 +3960,10 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
 
 		cmd = instance->cmd_list[i];
 
-		cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
+		cmd->frame = dma_pool_alloc(instance->frame_dma_pool,
 					    GFP_KERNEL, &cmd->frame_phys_addr);
 
-		cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
+		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
 					    GFP_KERNEL, &cmd->sense_phys_addr);
 
 		/*
@@ -3966,7 +3971,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
 		 * whatever has been allocated
 		 */
 		if (!cmd->frame || !cmd->sense) {
-			dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
 			megasas_teardown_frame_pool(instance);
 			return -ENOMEM;
 		}
@@ -5478,7 +5483,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
 		instance->throttlequeuedepth =
 				MEGASAS_THROTTLE_QUEUE_DEPTH;
 
-	if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
+	if ((resetwaittime < 1) ||
+	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
 
 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
@@ -5649,6 +5655,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
 		prev_aen.word =
 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
 
+		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
+		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
+			dev_info(&instance->pdev->dev,
+				 "%s %d out of range class %d send by application\n",
+				 __func__, __LINE__, curr_aen.members.class);
+			return 0;
+		}
+
 		/*
 		 * A class whose enum value is smaller is inclusive of all
 		 * higher values. If a PROGRESS (= -1) was previously
@@ -6096,14 +6110,12 @@ static int megasas_probe_one(struct pci_dev *pdev,
 		instance->pd_info = pci_alloc_consistent(pdev,
 			sizeof(struct MR_PD_INFO), &instance->pd_info_h);
 
-		instance->pd_info = pci_alloc_consistent(pdev,
-			sizeof(struct MR_PD_INFO), &instance->pd_info_h);
-		instance->tgt_prop = pci_alloc_consistent(pdev,
-			sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
-
 		if (!instance->pd_info)
 			dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
 
+		instance->tgt_prop = pci_alloc_consistent(pdev,
+			sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
+
 		if (!instance->tgt_prop)
 			dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
 
@@ -6663,9 +6675,14 @@ static void megasas_detach_one(struct pci_dev *pdev)
 						  fusion->max_map_sz,
 						  fusion->ld_map[i],
 						  fusion->ld_map_phys[i]);
-			if (fusion->ld_drv_map[i])
-				free_pages((ulong)fusion->ld_drv_map[i],
-					fusion->drv_map_pages);
+			if (fusion->ld_drv_map[i]) {
+				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
+					vfree(fusion->ld_drv_map[i]);
+				else
+					free_pages((ulong)fusion->ld_drv_map[i],
+						   fusion->drv_map_pages);
+			}
+
 			if (fusion->pd_seq_sync[i])
 				dma_free_coherent(&instance->pdev->dev,
 					pd_seq_map_sz,
@@ -6866,6 +6883,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 	void *sense = NULL;
 	dma_addr_t sense_handle;
 	unsigned long *sense_ptr;
+	u32 opcode;
 
 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
 
@@ -6893,15 +6911,16 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 	cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
 					       MFI_FRAME_SGL64 |
 					       MFI_FRAME_SENSE64));
+	opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
 
-	if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) {
+	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
 			megasas_return_cmd(instance, cmd);
 			return -1;
 		}
 	}
 
-	if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
+	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
 		error = megasas_set_crash_dump_params_ioctl(cmd);
 		megasas_return_cmd(instance, cmd);
 		return error;
@@ -6975,8 +6994,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 		cmd->sync_cmd = 0;
 		dev_err(&instance->pdev->dev,
 			"return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
-			__func__, __LINE__, cmd->frame->dcmd.opcode,
-			cmd->cmd_status_drv);
+			__func__, __LINE__, opcode,	cmd->cmd_status_drv);
 		return -EBUSY;
 	}
 
@@ -7323,49 +7341,39 @@ static struct pci_driver megasas_pci_driver = {
 /*
  * Sysfs driver attributes
  */
-static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
+static ssize_t version_show(struct device_driver *dd, char *buf)
 {
 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
 			MEGASAS_VERSION);
 }
+static DRIVER_ATTR_RO(version);
 
-static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
-
-static ssize_t
-megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
+static ssize_t release_date_show(struct device_driver *dd, char *buf)
 {
 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
 		MEGASAS_RELDATE);
 }
+static DRIVER_ATTR_RO(release_date);
 
-static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL);
-
-static ssize_t
-megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
+static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
 {
 	return sprintf(buf, "%u\n", support_poll_for_event);
 }
+static DRIVER_ATTR_RO(support_poll_for_event);
 
-static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
-			megasas_sysfs_show_support_poll_for_event, NULL);
-
- static ssize_t
-megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
+static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
 {
 	return sprintf(buf, "%u\n", support_device_change);
 }
+static DRIVER_ATTR_RO(support_device_change);
 
-static DRIVER_ATTR(support_device_change, S_IRUGO,
-			megasas_sysfs_show_support_device_change, NULL);
-
-static ssize_t
-megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
+static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
 {
 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
 }
 
-static ssize_t
-megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
+static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
+			     size_t count)
 {
 	int retval = count;
 
@@ -7375,9 +7383,7 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
 	}
 	return retval;
 }
-
-static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
-		megasas_sysfs_set_dbg_lvl);
+static DRIVER_ATTR_RW(dbg_lvl);
 
 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
 {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 62affa76133d330f59ad0d70e44fed119a1d46d7..ecc699a65bacb455061999f031465efe4be2b830 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -67,16 +67,6 @@ MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
 #define ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
 #define MR_LD_STATE_OPTIMAL 3
 
-#ifdef FALSE
-#undef FALSE
-#endif
-#define FALSE 0
-
-#ifdef TRUE
-#undef TRUE
-#endif
-#define TRUE 1
-
 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
 #define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
 #define SPAN_INVALID  0xff
@@ -709,7 +699,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
 	u32     pd, arRef, r1_alt_pd;
 	u8      physArm, span;
 	u64     row;
-	u8	retval = TRUE;
+	u8	retval = true;
 	u64	*pdBlock = &io_info->pdBlock;
 	__le16	*pDevHandle = &io_info->devHandle;
 	u8	*pPdInterface = &io_info->pd_interface;
@@ -727,7 +717,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
 	if (raid->level == 6) {
 		logArm = get_arm_from_strip(instance, ld, stripRow, map);
 		if (logArm == -1U)
-			return FALSE;
+			return false;
 		rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
 		armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
 		arm = armQ + 1 + logArm;
@@ -738,7 +728,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
 		/* Calculate the arm */
 		physArm = get_arm(instance, ld, span, stripRow, map);
 	if (physArm == 0xFF)
-		return FALSE;
+		return false;
 
 	arRef       = MR_LdSpanArrayGet(ld, span, map);
 	pd          = MR_ArPdGet(arRef, physArm, map);
@@ -812,7 +802,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 	u32         pd, arRef, r1_alt_pd;
 	u8          physArm, span;
 	u64         row;
-	u8	    retval = TRUE;
+	u8	    retval = true;
 	u64	    *pdBlock = &io_info->pdBlock;
 	__le16	    *pDevHandle = &io_info->devHandle;
 	u8	    *pPdInterface = &io_info->pd_interface;
@@ -829,7 +819,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 		u32 rowMod, armQ, arm;
 
 		if (raid->rowSize == 0)
-			return FALSE;
+			return false;
 		/* get logical row mod */
 		rowMod = mega_mod64(row, raid->rowSize);
 		armQ = raid->rowSize-1-rowMod; /* index of Q drive */
@@ -839,7 +829,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 		physArm = (u8)arm;
 	} else  {
 		if (raid->modFactor == 0)
-			return FALSE;
+			return false;
 		physArm = MR_LdDataArmGet(ld,  mega_mod64(stripRow,
 							  raid->modFactor),
 					  map);
@@ -851,7 +841,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 	} else {
 		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
 		if (span == SPAN_INVALID)
-			return FALSE;
+			return false;
 	}
 
 	/* Get the array on which this span is present */
@@ -954,7 +944,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 	 */
 	if (raid->rowDataSize == 0) {
 		if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
-			return FALSE;
+			return false;
 		else if (instance->UnevenSpanSupport) {
 			io_info->IoforUnevenSpan = 1;
 		} else {
@@ -963,7 +953,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 				"rowDataSize = 0x%0x,"
 				"but there is _NO_ UnevenSpanSupport\n",
 				MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
-			return FALSE;
+			return false;
 		}
 	}
 
@@ -988,7 +978,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 			dev_info(&instance->pdev->dev, "return from %s %d."
 				"Send IO w/o region lock.\n",
 				__func__, __LINE__);
-			return FALSE;
+			return false;
 		}
 
 		if (raid->spanDepth == 1) {
@@ -1004,7 +994,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 				(unsigned long long)start_row,
 				(unsigned long long)start_strip,
 				(unsigned long long)endStrip);
-			return FALSE;
+			return false;
 		}
 		io_info->start_span	= startlba_span;
 		io_info->start_row	= start_row;
@@ -1038,7 +1028,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 					       raid->capability.
 					       fpWriteAcrossStripe));
 	} else
-		io_info->fpOkForIo = FALSE;
+		io_info->fpOkForIo = false;
 
 	if (numRows == 1) {
 		/* single-strip IOs can always lock only the data needed */
@@ -1124,7 +1114,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 					pRAID_Context, map);
 		/* If IO on an invalid Pd, then FP is not possible.*/
 		if (io_info->devHandle == MR_DEVHANDLE_INVALID)
-			io_info->fpOkForIo = FALSE;
+			io_info->fpOkForIo = false;
 		return retval;
 	} else if (isRead) {
 		uint stripIdx;
@@ -1138,10 +1128,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 				    start_strip + stripIdx, ref_in_start_stripe,
 				    io_info, pRAID_Context, map);
 			if (!retval)
-				return TRUE;
+				return true;
 		}
 	}
-	return TRUE;
+	return true;
 }
 
 /*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 985510628f564286f5df386085476e9020e2ad47..11bd2e698b84c6b9a637fea7d4aba8928d04e3fd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -313,20 +313,20 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
 		cmd = fusion->cmd_list[i];
 		if (cmd) {
 			if (cmd->sg_frame)
-				pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
+				dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
 				      cmd->sg_frame_phys_addr);
 			if (cmd->sense)
-				pci_pool_free(fusion->sense_dma_pool, cmd->sense,
+				dma_pool_free(fusion->sense_dma_pool, cmd->sense,
 				      cmd->sense_phys_addr);
 		}
 	}
 
 	if (fusion->sg_dma_pool) {
-		pci_pool_destroy(fusion->sg_dma_pool);
+		dma_pool_destroy(fusion->sg_dma_pool);
 		fusion->sg_dma_pool = NULL;
 	}
 	if (fusion->sense_dma_pool) {
-		pci_pool_destroy(fusion->sense_dma_pool);
+		dma_pool_destroy(fusion->sense_dma_pool);
 		fusion->sense_dma_pool = NULL;
 	}
 
@@ -343,11 +343,11 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
 			fusion->request_alloc_sz, fusion->req_frames_desc,
 			fusion->req_frames_desc_phys);
 	if (fusion->io_request_frames)
-		pci_pool_free(fusion->io_request_frames_pool,
+		dma_pool_free(fusion->io_request_frames_pool,
 			fusion->io_request_frames,
 			fusion->io_request_frames_phys);
 	if (fusion->io_request_frames_pool) {
-		pci_pool_destroy(fusion->io_request_frames_pool);
+		dma_pool_destroy(fusion->io_request_frames_pool);
 		fusion->io_request_frames_pool = NULL;
 	}
 
@@ -376,12 +376,12 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 
 
 	fusion->sg_dma_pool =
-			pci_pool_create("mr_sg", instance->pdev,
+			dma_pool_create("mr_sg", &instance->pdev->dev,
 				instance->max_chain_frame_sz,
 				MR_DEFAULT_NVME_PAGE_SIZE, 0);
 	/* SCSI_SENSE_BUFFERSIZE  = 96 bytes */
 	fusion->sense_dma_pool =
-			pci_pool_create("mr_sense", instance->pdev,
+			dma_pool_create("mr_sense", &instance->pdev->dev,
 				SCSI_SENSE_BUFFERSIZE, 64, 0);
 
 	if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
@@ -395,10 +395,10 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 	 */
 	for (i = 0; i < max_cmd; i++) {
 		cmd = fusion->cmd_list[i];
-		cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
+		cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
 					GFP_KERNEL, &cmd->sg_frame_phys_addr);
 
-		cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
+		cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
 					GFP_KERNEL, &cmd->sense_phys_addr);
 		if (!cmd->sg_frame || !cmd->sense) {
 			dev_err(&instance->pdev->dev,
@@ -410,7 +410,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 	/* create sense buffer for the raid 1/10 fp */
 	for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
 		cmd = fusion->cmd_list[i];
-		cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
+		cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
 			GFP_KERNEL, &cmd->sense_phys_addr);
 		if (!cmd->sense) {
 			dev_err(&instance->pdev->dev,
@@ -479,7 +479,7 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
 	}
 
 	fusion->io_request_frames_pool =
-			pci_pool_create("mr_ioreq", instance->pdev,
+			dma_pool_create("mr_ioreq", &instance->pdev->dev,
 				fusion->io_frames_alloc_sz, 16, 0);
 
 	if (!fusion->io_request_frames_pool) {
@@ -489,7 +489,7 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
 	}
 
 	fusion->io_request_frames =
-			pci_pool_alloc(fusion->io_request_frames_pool,
+			dma_pool_alloc(fusion->io_request_frames_pool,
 				GFP_KERNEL, &fusion->io_request_frames_phys);
 	if (!fusion->io_request_frames) {
 		dev_err(&instance->pdev->dev,
@@ -509,7 +509,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
 
 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
 	fusion->reply_frames_desc_pool =
-			pci_pool_create("mr_reply", instance->pdev,
+			dma_pool_create("mr_reply", &instance->pdev->dev,
 				fusion->reply_alloc_sz * count, 16, 0);
 
 	if (!fusion->reply_frames_desc_pool) {
@@ -519,7 +519,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
 	}
 
 	fusion->reply_frames_desc[0] =
-		pci_pool_alloc(fusion->reply_frames_desc_pool,
+		dma_pool_alloc(fusion->reply_frames_desc_pool,
 			GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
 	if (!fusion->reply_frames_desc[0]) {
 		dev_err(&instance->pdev->dev,
@@ -562,8 +562,10 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
 	memset(fusion->rdpq_virt, 0,
 			sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
-	fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq",
-							 instance->pdev, fusion->reply_alloc_sz, 16, 0);
+	fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
+							 &instance->pdev->dev,
+							 fusion->reply_alloc_sz,
+							 16, 0);
 
 	if (!fusion->reply_frames_desc_pool) {
 		dev_err(&instance->pdev->dev,
@@ -573,7 +575,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
 
 	for (i = 0; i < count; i++) {
 		fusion->reply_frames_desc[i] =
-				pci_pool_alloc(fusion->reply_frames_desc_pool,
+				dma_pool_alloc(fusion->reply_frames_desc_pool,
 					GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
 		if (!fusion->reply_frames_desc[i]) {
 			dev_err(&instance->pdev->dev,
@@ -601,13 +603,13 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
 
 	for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
 		if (fusion->reply_frames_desc[i])
-			pci_pool_free(fusion->reply_frames_desc_pool,
+			dma_pool_free(fusion->reply_frames_desc_pool,
 				fusion->reply_frames_desc[i],
 				fusion->reply_frames_desc_phys[i]);
 	}
 
 	if (fusion->reply_frames_desc_pool)
-		pci_pool_destroy(fusion->reply_frames_desc_pool);
+		dma_pool_destroy(fusion->reply_frames_desc_pool);
 
 	if (fusion->rdpq_virt)
 		pci_free_consistent(instance->pdev,
@@ -623,12 +625,12 @@ megasas_free_reply_fusion(struct megasas_instance *instance) {
 	fusion = instance->ctrl_context;
 
 	if (fusion->reply_frames_desc[0])
-		pci_pool_free(fusion->reply_frames_desc_pool,
+		dma_pool_free(fusion->reply_frames_desc_pool,
 			fusion->reply_frames_desc[0],
 			fusion->reply_frames_desc_phys[0]);
 
 	if (fusion->reply_frames_desc_pool)
-		pci_pool_destroy(fusion->reply_frames_desc_pool);
+		dma_pool_destroy(fusion->reply_frames_desc_pool);
 
 }
 
@@ -914,7 +916,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
 		ret = 1;
 		goto fail_fw_init;
 	}
-	dev_info(&instance->pdev->dev, "Init cmd success\n");
 
 	ret = 0;
 
@@ -925,6 +926,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
 				  IOCInitMessage, ioc_init_handle);
 fail_get_cmd:
+	dev_err(&instance->pdev->dev,
+		"Init cmd return status %s for SCSI host %d\n",
+		ret ? "FAILED" : "SUCCESS", instance->host->host_no);
+
 	return ret;
 }
 
@@ -1260,6 +1265,80 @@ megasas_display_intel_branding(struct megasas_instance *instance)
 	}
 }
 
+/**
+ * megasas_allocate_raid_maps -	Allocate memory for RAID maps
+ * @instance:				Adapter soft state
+ *
+ * return:				if success: return 0
+ *					failed:  return -ENOMEM
+ */
+static inline int megasas_allocate_raid_maps(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion;
+	int i = 0;
+
+	fusion = instance->ctrl_context;
+
+	fusion->drv_map_pages = get_order(fusion->drv_map_sz);
+
+	for (i = 0; i < 2; i++) {
+		fusion->ld_map[i] = NULL;
+
+		fusion->ld_drv_map[i] = (void *)
+			__get_free_pages(__GFP_ZERO | GFP_KERNEL,
+					 fusion->drv_map_pages);
+
+		if (!fusion->ld_drv_map[i]) {
+			fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz);
+
+			if (!fusion->ld_drv_map[i]) {
+				dev_err(&instance->pdev->dev,
+					"Could not allocate memory for local map"
+					" size requested: %d\n",
+					fusion->drv_map_sz);
+				goto ld_drv_map_alloc_fail;
+			}
+		}
+	}
+
+	for (i = 0; i < 2; i++) {
+		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
+						       fusion->max_map_sz,
+						       &fusion->ld_map_phys[i],
+						       GFP_KERNEL);
+		if (!fusion->ld_map[i]) {
+			dev_err(&instance->pdev->dev,
+				"Could not allocate memory for map info %s:%d\n",
+				__func__, __LINE__);
+			goto ld_map_alloc_fail;
+		}
+	}
+
+	return 0;
+
+ld_map_alloc_fail:
+	for (i = 0; i < 2; i++) {
+		if (fusion->ld_map[i])
+			dma_free_coherent(&instance->pdev->dev,
+					  fusion->max_map_sz,
+					  fusion->ld_map[i],
+					  fusion->ld_map_phys[i]);
+	}
+
+ld_drv_map_alloc_fail:
+	for (i = 0; i < 2; i++) {
+		if (fusion->ld_drv_map[i]) {
+			if (is_vmalloc_addr(fusion->ld_drv_map[i]))
+				vfree(fusion->ld_drv_map[i]);
+			else
+				free_pages((ulong)fusion->ld_drv_map[i],
+					   fusion->drv_map_pages);
+		}
+	}
+
+	return -ENOMEM;
+}
+
 /**
  * megasas_init_adapter_fusion -	Initializes the FW
  * @instance:		Adapter soft state
@@ -1379,45 +1458,14 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
 	instance->r1_ldio_hint_default =  MR_R1_LDIO_PIGGYBACK_DEFAULT;
 	fusion->fast_path_io = 0;
 
-	fusion->drv_map_pages = get_order(fusion->drv_map_sz);
-	for (i = 0; i < 2; i++) {
-		fusion->ld_map[i] = NULL;
-		fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
-			fusion->drv_map_pages);
-		if (!fusion->ld_drv_map[i]) {
-			dev_err(&instance->pdev->dev, "Could not allocate "
-				"memory for local map info for %d pages\n",
-				fusion->drv_map_pages);
-			if (i == 1)
-				free_pages((ulong)fusion->ld_drv_map[0],
-					fusion->drv_map_pages);
-			goto fail_ioc_init;
-		}
-		memset(fusion->ld_drv_map[i], 0,
-			((1 << PAGE_SHIFT) << fusion->drv_map_pages));
-	}
-
-	for (i = 0; i < 2; i++) {
-		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
-						       fusion->max_map_sz,
-						       &fusion->ld_map_phys[i],
-						       GFP_KERNEL);
-		if (!fusion->ld_map[i]) {
-			dev_err(&instance->pdev->dev, "Could not allocate memory "
-			       "for map info\n");
-			goto fail_map_info;
-		}
-	}
+	if (megasas_allocate_raid_maps(instance))
+		goto fail_ioc_init;
 
 	if (!megasas_get_map_info(instance))
 		megasas_sync_map_info(instance);
 
 	return 0;
 
-fail_map_info:
-	if (i == 1)
-		dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
-				  fusion->ld_map[0], fusion->ld_map_phys[0]);
 fail_ioc_init:
 	megasas_free_cmds_fusion(instance);
 fail_alloc_cmds:
@@ -3287,7 +3335,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
 		MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
 
-	mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
+	mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size);
 }
 
 /**
@@ -3369,17 +3417,13 @@ megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
 {
 	unsigned int i;
 
-	instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
 	for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
-		instance->crash_buf[i] = (void	*)__get_free_pages(GFP_KERNEL,
-				instance->crash_buf_pages);
+		instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE);
 		if (!instance->crash_buf[i]) {
 			dev_info(&instance->pdev->dev, "Firmware crash dump "
 				"memory allocation failed at index %d\n", i);
 			break;
 		}
-		memset(instance->crash_buf[i], 0,
-			((1 << PAGE_SHIFT) << instance->crash_buf_pages));
 	}
 	instance->drv_buf_alloc = i;
 }
@@ -3391,12 +3435,10 @@ megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
 void
 megasas_free_host_crash_buffer(struct megasas_instance *instance)
 {
-	unsigned int i
-;
+	unsigned int i;
 	for (i = 0; i < instance->drv_buf_alloc; i++) {
 		if (instance->crash_buf[i])
-			free_pages((ulong)instance->crash_buf[i],
-					instance->crash_buf_pages);
+			vfree(instance->crash_buf[i]);
 	}
 	instance->drv_buf_index = 0;
 	instance->drv_buf_alloc = 0;
@@ -3556,6 +3598,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
 			}
 		}
 
+		megasas_complete_cmd_dpc_fusion((unsigned long)instance);
 		outstanding = atomic_read(&instance->fw_outstanding);
 		if (!outstanding)
 			goto out;
@@ -3564,8 +3607,6 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
 			       "commands to complete for scsi%d\n", i,
 			       outstanding, instance->host->host_no);
-			megasas_complete_cmd_dpc_fusion(
-				(unsigned long)instance);
 		}
 		msleep(1000);
 	}
@@ -3623,6 +3664,15 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
 
 		if (!smid)
 			continue;
+
+		/* Do not refire shutdown command */
+		if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) ==
+			MR_DCMD_CTRL_SHUTDOWN) {
+			cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
+			megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+			continue;
+		}
+
 		req_desc = megasas_get_request_descriptor
 					(instance, smid - 1);
 		refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
@@ -3750,7 +3800,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
 	struct megasas_cmd_fusion *cmd_fusion;
 	struct megasas_cmd *cmd_mfi;
 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
-	struct fusion_context *fusion;
+	struct fusion_context *fusion = NULL;
 	struct megasas_cmd_fusion *scsi_lookup;
 	int rc;
 	struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
@@ -3777,8 +3827,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
 	cmd_fusion->request_desc = req_desc;
 	req_desc->Words = 0;
 
-	scsi_lookup = fusion->cmd_list[smid_task - 1];
-
 	mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
 	memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
 	mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
@@ -3825,13 +3873,13 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
 	rc = SUCCESS;
 	switch (type) {
 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+		scsi_lookup = fusion->cmd_list[smid_task - 1];
+
 		if (scsi_lookup->scmd == NULL)
 			break;
 		else {
 			instance->instancet->disable_intr(instance);
 			megasas_sync_irqs((unsigned long)instance);
-			megasas_complete_cmd_dpc_fusion
-					((unsigned long)instance);
 			instance->instancet->enable_intr(instance);
 			if (scsi_lookup->scmd == NULL)
 				break;
@@ -3843,9 +3891,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
 		if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
 			break;
 		instance->instancet->disable_intr(instance);
-		msleep(1000);
-		megasas_complete_cmd_dpc_fusion
-				((unsigned long)instance);
+		megasas_sync_irqs((unsigned long)instance);
 		rc = megasas_track_scsiio(instance, id, channel);
 		instance->instancet->enable_intr(instance);
 
@@ -4271,9 +4317,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 			megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
 
 			if (megasas_ioc_init_fusion(instance)) {
-				dev_warn(&instance->pdev->dev,
-				       "megasas_ioc_init_fusion() failed! for "
-				       "scsi%d\n", instance->host->host_no);
 				if (instance->requestorId && !reason)
 					goto fail_kill_adapter;
 				else
@@ -4319,6 +4362,10 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 			instance->instancet->enable_intr(instance);
 			atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
 
+			dev_info(&instance->pdev->dev, "Interrupts are enabled and"
+				" controller is OPERATIONAL for scsi:%d\n",
+				instance->host->host_no);
+
 			/* Restart SR-IOV heartbeat */
 			if (instance->requestorId) {
 				if (!megasas_sriov_start_heartbeat(instance, 0))
@@ -4330,11 +4377,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 					instance->skip_heartbeat_timer_del = 1;
 			}
 
-			/* Adapter reset completed successfully */
-			dev_warn(&instance->pdev->dev, "Reset "
-			       "successful for scsi%d.\n",
-				instance->host->host_no);
-
 			if (instance->crash_dump_drv_support &&
 				instance->crash_dump_app_support)
 				megasas_set_crash_dump_params(instance,
@@ -4344,6 +4386,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 					MR_CRASH_BUF_TURN_OFF);
 
 			retval = SUCCESS;
+
+			/* Adapter reset completed successfully */
+			dev_warn(&instance->pdev->dev,
+				 "Reset successful for scsi%d.\n",
+				 instance->host->host_no);
+
 			goto out;
 		}
 fail_kill_adapter:
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 18039bba26c43452751c6f492fe0a5e73eaabfc3..87999905bca347c191e957c6ab3c94510a0ca945 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -615,9 +615,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
 		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
 		    "start" : "stop");
 		if (event_data->DiscoveryStatus)
-			pr_info("discovery_status(0x%08x)",
+			pr_cont(" discovery_status(0x%08x)",
 			    le32_to_cpu(event_data->DiscoveryStatus));
-			pr_info("\n");
+		pr_cont("\n");
 		return;
 	}
 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
@@ -3198,9 +3198,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	}
 
 	if (ioc->sense) {
-		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
-		if (ioc->sense_dma_pool)
-			pci_pool_destroy(ioc->sense_dma_pool);
+		dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
+		dma_pool_destroy(ioc->sense_dma_pool);
 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
 			"sense_pool(0x%p): free\n",
 			ioc->name, ioc->sense));
@@ -3208,9 +3207,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	}
 
 	if (ioc->reply) {
-		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
-		if (ioc->reply_dma_pool)
-			pci_pool_destroy(ioc->reply_dma_pool);
+		dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
+		dma_pool_destroy(ioc->reply_dma_pool);
 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
 			"reply_pool(0x%p): free\n",
 			ioc->name, ioc->reply));
@@ -3218,10 +3216,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	}
 
 	if (ioc->reply_free) {
-		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
+		dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
 		    ioc->reply_free_dma);
-		if (ioc->reply_free_dma_pool)
-			pci_pool_destroy(ioc->reply_free_dma_pool);
+		dma_pool_destroy(ioc->reply_free_dma_pool);
 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
 			"reply_free_pool(0x%p): free\n",
 			ioc->name, ioc->reply_free));
@@ -3232,7 +3229,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 		do {
 			rps = &ioc->reply_post[i];
 			if (rps->reply_post_free) {
-				pci_pool_free(
+				dma_pool_free(
 				    ioc->reply_post_free_dma_pool,
 				    rps->reply_post_free,
 				    rps->reply_post_free_dma);
@@ -3244,8 +3241,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 		} while (ioc->rdpq_array_enable &&
 			   (++i < ioc->reply_queue_count));
 
-		if (ioc->reply_post_free_dma_pool)
-			pci_pool_destroy(ioc->reply_post_free_dma_pool);
+		dma_pool_destroy(ioc->reply_post_free_dma_pool);
 		kfree(ioc->reply_post);
 	}
 
@@ -3266,12 +3262,11 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	if (ioc->chain_lookup) {
 		for (i = 0; i < ioc->chain_depth; i++) {
 			if (ioc->chain_lookup[i].chain_buffer)
-				pci_pool_free(ioc->chain_dma_pool,
+				dma_pool_free(ioc->chain_dma_pool,
 				    ioc->chain_lookup[i].chain_buffer,
 				    ioc->chain_lookup[i].chain_buffer_dma);
 		}
-		if (ioc->chain_dma_pool)
-			pci_pool_destroy(ioc->chain_dma_pool);
+		dma_pool_destroy(ioc->chain_dma_pool);
 		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
 		ioc->chain_lookup = NULL;
 	}
@@ -3446,23 +3441,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 			ioc->name);
 		goto out;
 	}
-	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
-	    ioc->pdev, sz, 16, 0);
+	ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
+	    &ioc->pdev->dev, sz, 16, 0);
 	if (!ioc->reply_post_free_dma_pool) {
 		pr_err(MPT3SAS_FMT
-		 "reply_post_free pool: pci_pool_create failed\n",
+		 "reply_post_free pool: dma_pool_create failed\n",
 		 ioc->name);
 		goto out;
 	}
 	i = 0;
 	do {
 		ioc->reply_post[i].reply_post_free =
-		    pci_pool_alloc(ioc->reply_post_free_dma_pool,
+		    dma_pool_alloc(ioc->reply_post_free_dma_pool,
 		    GFP_KERNEL,
 		    &ioc->reply_post[i].reply_post_free_dma);
 		if (!ioc->reply_post[i].reply_post_free) {
 			pr_err(MPT3SAS_FMT
-			"reply_post_free pool: pci_pool_alloc failed\n",
+			"reply_post_free pool: dma_pool_alloc failed\n",
 			ioc->name);
 			goto out;
 		}
@@ -3577,15 +3572,15 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 			ioc->name);
 		goto out;
 	}
-	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+	ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
 	    ioc->chain_segment_sz, 16, 0);
 	if (!ioc->chain_dma_pool) {
-		pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
+		pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
 			ioc->name);
 		goto out;
 	}
 	for (i = 0; i < ioc->chain_depth; i++) {
-		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
+		ioc->chain_lookup[i].chain_buffer = dma_pool_alloc(
 		    ioc->chain_dma_pool , GFP_KERNEL,
 		    &ioc->chain_lookup[i].chain_buffer_dma);
 		if (!ioc->chain_lookup[i].chain_buffer) {
@@ -3630,17 +3625,17 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 
 	/* sense buffers, 4 byte align */
 	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
-	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
-	    0);
+	ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
+					      4, 0);
 	if (!ioc->sense_dma_pool) {
-		pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
+		pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
 		    ioc->name);
 		goto out;
 	}
-	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
+	ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
 	    &ioc->sense_dma);
 	if (!ioc->sense) {
-		pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
+		pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
 		    ioc->name);
 		goto out;
 	}
@@ -3654,17 +3649,17 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 
 	/* reply pool, 4 byte align */
 	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
-	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
-	    0);
+	ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
+					      4, 0);
 	if (!ioc->reply_dma_pool) {
-		pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
+		pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
 		    ioc->name);
 		goto out;
 	}
-	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
+	ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
 	    &ioc->reply_dma);
 	if (!ioc->reply) {
-		pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
+		pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
 		    ioc->name);
 		goto out;
 	}
@@ -3680,17 +3675,17 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 
 	/* reply free queue, 16 byte align */
 	sz = ioc->reply_free_queue_depth * 4;
-	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
-	    ioc->pdev, sz, 16, 0);
+	ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
+	    &ioc->pdev->dev, sz, 16, 0);
 	if (!ioc->reply_free_dma_pool) {
-		pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
+		pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
 			ioc->name);
 		goto out;
 	}
-	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
+	ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
 	    &ioc->reply_free_dma);
 	if (!ioc->reply_free) {
-		pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
+		pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
 			ioc->name);
 		goto out;
 	}
@@ -3708,7 +3703,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 	    ioc->config_page_sz, &ioc->config_page_dma);
 	if (!ioc->config_page) {
 		pr_err(MPT3SAS_FMT
-			"config page: pci_pool_alloc failed\n",
+			"config page: dma_pool_alloc failed\n",
 			ioc->name);
 		goto out;
 	}
@@ -5499,10 +5494,10 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
 	mutex_init(&ioc->ctl_cmds.mutex);
 
-	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
-	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
-	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
-	    !ioc->ctl_cmds.sense) {
+	if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
+	    !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
+	    !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
+	    !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
 		r = -ENOMEM;
 		goto out_free_resources;
 	}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 099ab4ca7edfdb8eca1f976273b01562c0298ebc..a77bb7dc12b1ee5fc4c871148012b74d7d03962e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -970,7 +970,7 @@ struct MPT3SAS_ADAPTER {
 	u8		id;
 	int		cpu_count;
 	char		name[MPT_NAME_LENGTH];
-	char		driver_name[MPT_NAME_LENGTH];
+	char		driver_name[MPT_NAME_LENGTH - 8];
 	char		tmp_string[MPT_STRING_LENGTH];
 	struct pci_dev	*pdev;
 	Mpi2SystemInterfaceRegs_t __iomem *chip;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e7a7a704a315d4a63dda72f4a85d745c1caaf27a..d3940c5d079dc728ada053fc6ca6023fdd5cc02b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1870,6 +1870,38 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
 	return rc;
 }
 
+static int
+_transport_map_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+		dma_addr_t *dma_addr, size_t *dma_len, void **p)
+{
+	/* Check if the request is split across multiple segments */
+	if (buf->sg_cnt > 1) {
+		*p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
+				GFP_KERNEL);
+		if (!*p)
+			return -ENOMEM;
+		*dma_len = buf->payload_len;
+	} else {
+		if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
+			return -ENOMEM;
+		*dma_addr = sg_dma_address(buf->sg_list);
+		*dma_len = sg_dma_len(buf->sg_list);
+		*p = NULL;
+	}
+
+	return 0;
+}
+
+static void
+_transport_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+		dma_addr_t dma_addr, void *p)
+{
+	if (p)
+		dma_free_coherent(dev, buf->payload_len, p, dma_addr);
+	else
+		dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
+}
+
 /**
  * _transport_smp_handler - transport portal for smp passthru
  * @shost: shost object
@@ -1880,9 +1912,9 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
  * Example:
  *           smp_rep_general /sys/class/bsg/expander-5:0
  */
-static int
-_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
-	struct request *req)
+static void
+_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+		struct sas_rphy *rphy)
 {
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 	Mpi2SmpPassthroughRequest_t *mpi_request;
@@ -1891,33 +1923,25 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	u16 smid;
 	u32 ioc_state;
 	void *psge;
-	u8 issue_reset = 0;
-	dma_addr_t dma_addr_in = 0;
-	dma_addr_t dma_addr_out = 0;
-	dma_addr_t pci_dma_in = 0;
-	dma_addr_t pci_dma_out = 0;
-	void *pci_addr_in = NULL;
-	void *pci_addr_out = NULL;
+	dma_addr_t dma_addr_in;
+	dma_addr_t dma_addr_out;
+	void *addr_in = NULL;
+	void *addr_out = NULL;
+	size_t dma_len_in;
+	size_t dma_len_out;
 	u16 wait_state_count;
-	struct request *rsp = req->next_rq;
-	struct bio_vec bvec;
-	struct bvec_iter iter;
-
-	if (!rsp) {
-		pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
-			ioc->name, __func__);
-		return -EINVAL;
-	}
+	unsigned int reslen = 0;
 
 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
 		pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
 		    __func__, ioc->name);
-		return -EFAULT;
+		rc = -EFAULT;
+		goto out;
 	}
 
 	rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
 	if (rc)
-		return rc;
+		goto out;
 
 	if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
 		pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
@@ -1927,58 +1951,20 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	}
 	ioc->transport_cmds.status = MPT3_CMD_PENDING;
 
-	/* Check if the request is split across multiple segments */
-	if (bio_multiple_segments(req->bio)) {
-		u32 offset = 0;
-
-		/* Allocate memory and copy the request */
-		pci_addr_out = pci_alloc_consistent(ioc->pdev,
-		    blk_rq_bytes(req), &pci_dma_out);
-		if (!pci_addr_out) {
-			pr_info(MPT3SAS_FMT "%s(): PCI Addr out = NULL\n",
-			    ioc->name, __func__);
-			rc = -ENOMEM;
-			goto out;
-		}
-
-		bio_for_each_segment(bvec, req->bio, iter) {
-			memcpy(pci_addr_out + offset,
-			    page_address(bvec.bv_page) + bvec.bv_offset,
-			    bvec.bv_len);
-			offset += bvec.bv_len;
-		}
-	} else {
-		dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
-		    blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
-		if (pci_dma_mapping_error(ioc->pdev, dma_addr_out)) {
-			pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n",
-			    ioc->name, __func__);
-			rc = -ENOMEM;
-			goto free_pci;
-		}
+	rc = _transport_map_smp_buffer(&ioc->pdev->dev, &job->request_payload,
+			&dma_addr_out, &dma_len_out, &addr_out);
+	if (rc)
+		goto out;
+	if (addr_out) {
+		sg_copy_to_buffer(job->request_payload.sg_list,
+				job->request_payload.sg_cnt, addr_out,
+				job->request_payload.payload_len);
 	}
 
-	/* Check if the response needs to be populated across
-	 * multiple segments */
-	if (bio_multiple_segments(rsp->bio)) {
-		pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
-		    &pci_dma_in);
-		if (!pci_addr_in) {
-			pr_info(MPT3SAS_FMT "%s(): PCI Addr in = NULL\n",
-			    ioc->name, __func__);
-			rc = -ENOMEM;
-			goto unmap;
-		}
-	} else {
-		dma_addr_in =  pci_map_single(ioc->pdev, bio_data(rsp->bio),
-		    blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
-		if (pci_dma_mapping_error(ioc->pdev, dma_addr_in)) {
-			pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n",
-			    ioc->name, __func__);
-			rc = -ENOMEM;
-			goto unmap;
-		}
-	}
+	rc = _transport_map_smp_buffer(&ioc->pdev->dev, &job->reply_payload,
+			&dma_addr_in, &dma_len_in, &addr_in);
+	if (rc)
+		goto unmap_out;
 
 	wait_state_count = 0;
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
@@ -1988,7 +1974,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 			    "%s: failed due to ioc not operational\n",
 			    ioc->name, __func__);
 			rc = -EFAULT;
-			goto unmap;
+			goto unmap_in;
 		}
 		ssleep(1);
 		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
@@ -2005,7 +1991,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
 		    ioc->name, __func__);
 		rc = -EAGAIN;
-		goto unmap;
+		goto unmap_in;
 	}
 
 	rc = 0;
@@ -2018,15 +2004,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	mpi_request->SASAddress = (rphy) ?
 	    cpu_to_le64(rphy->identify.sas_address) :
 	    cpu_to_le64(ioc->sas_hba.sas_address);
-	mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
+	mpi_request->RequestDataLength = cpu_to_le16(dma_len_out - 4);
 	psge = &mpi_request->SGL;
 
-	if (bio_multiple_segments(req->bio))
-		ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
-		    pci_dma_in, (blk_rq_bytes(rsp) + 4));
-	else
-		ioc->build_sg(ioc, psge, dma_addr_out, (blk_rq_bytes(req) - 4),
-		    dma_addr_in, (blk_rq_bytes(rsp) + 4));
+	ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in,
+			dma_len_in - 4);
 
 	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
 		"%s - sending smp request\n", ioc->name, __func__));
@@ -2040,83 +2022,51 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 		    __func__, ioc->name);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2SmpPassthroughRequest_t)/4);
-		if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
-			issue_reset = 1;
-		goto issue_host_reset;
+		if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) {
+			mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+			rc = -ETIMEDOUT;
+			goto unmap_in;
+		}
 	}
 
 	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
 		"%s - complete\n", ioc->name, __func__));
 
-	if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
-
-		mpi_reply = ioc->transport_cmds.reply;
-
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "%s - reply data transfer size(%d)\n",
-		    ioc->name, __func__,
-		    le16_to_cpu(mpi_reply->ResponseDataLength)));
-
-		memcpy(scsi_req(req)->sense, mpi_reply, sizeof(*mpi_reply));
-		scsi_req(req)->sense_len = sizeof(*mpi_reply);
-		scsi_req(req)->resid_len = 0;
-		scsi_req(rsp)->resid_len -=
-		    le16_to_cpu(mpi_reply->ResponseDataLength);
-
-		/* check if the resp needs to be copied from the allocated
-		 * pci mem */
-		if (bio_multiple_segments(rsp->bio)) {
-			u32 offset = 0;
-			u32 bytes_to_copy =
-			    le16_to_cpu(mpi_reply->ResponseDataLength);
-			bio_for_each_segment(bvec, rsp->bio, iter) {
-				if (bytes_to_copy <= bvec.bv_len) {
-					memcpy(page_address(bvec.bv_page) +
-					    bvec.bv_offset, pci_addr_in +
-					    offset, bytes_to_copy);
-					break;
-				} else {
-					memcpy(page_address(bvec.bv_page) +
-					    bvec.bv_offset, pci_addr_in +
-					    offset, bvec.bv_len);
-					bytes_to_copy -= bvec.bv_len;
-				}
-				offset += bvec.bv_len;
-			}
-		}
-	} else {
+	if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) {
 		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
 		    "%s - no reply\n", ioc->name, __func__));
 		rc = -ENXIO;
+		goto unmap_in;
 	}
 
- issue_host_reset:
-	if (issue_reset) {
-		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-		rc = -ETIMEDOUT;
-	}
+	mpi_reply = ioc->transport_cmds.reply;
 
- unmap:
-	if (dma_addr_out)
-		pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
-		    PCI_DMA_BIDIRECTIONAL);
-	if (dma_addr_in)
-		pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
-		    PCI_DMA_BIDIRECTIONAL);
+	dtransportprintk(ioc,
+		pr_info(MPT3SAS_FMT "%s - reply data transfer size(%d)\n",
+			ioc->name, __func__,
+			le16_to_cpu(mpi_reply->ResponseDataLength)));
 
- free_pci:
-	if (pci_addr_out)
-		pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
-		    pci_dma_out);
+	memcpy(job->reply, mpi_reply, sizeof(*mpi_reply));
+	job->reply_len = sizeof(*mpi_reply);
+	reslen = le16_to_cpu(mpi_reply->ResponseDataLength);
 
-	if (pci_addr_in)
-		pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
-		    pci_dma_in);
+	if (addr_in) {
+		sg_copy_to_buffer(job->reply_payload.sg_list,
+				job->reply_payload.sg_cnt, addr_in,
+				job->reply_payload.payload_len);
+	}
 
+	rc = 0;
+ unmap_in:
+	_transport_unmap_smp_buffer(&ioc->pdev->dev, &job->reply_payload,
+			dma_addr_in, addr_in);
+ unmap_out:
+	_transport_unmap_smp_buffer(&ioc->pdev->dev, &job->request_payload,
+			dma_addr_out, addr_out);
  out:
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	mutex_unlock(&ioc->transport_cmds.mutex);
-	return rc;
+	bsg_job_done(job, rc, reslen);
 }
 
 struct sas_function_template mpt3sas_transport_functions = {
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index e7f6661a8862112a7a126e2ee4ec07c369c7dd5a..4f515700bdc349511cf5df90035f589b3a9e8039 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -121,21 +121,6 @@ int mvme147_detect(struct scsi_host_template *tpnt)
 	return 0;
 }
 
-static int mvme147_bus_reset(struct scsi_cmnd *cmd)
-{
-	/* FIXME perform bus-specific reset */
-
-	/* FIXME 2: kill this function, and let midlayer fallback to
-	   the same result, calling wd33c93_host_reset() */
-
-	spin_lock_irq(cmd->device->host->host_lock);
-	wd33c93_host_reset(cmd);
-	spin_unlock_irq(cmd->device->host->host_lock);
-
-	return SUCCESS;
-}
-
-
 static struct scsi_host_template driver_template = {
 	.proc_name		= "MVME147",
 	.name			= "MVME147 built-in SCSI",
@@ -143,7 +128,6 @@ static struct scsi_host_template driver_template = {
 	.release		= mvme147_release,
 	.queuecommand		= wd33c93_queuecommand,
 	.eh_abort_handler	= wd33c93_abort,
-	.eh_bus_reset_handler	= mvme147_bus_reset,
 	.eh_host_reset_handler	= wd33c93_host_reset,
 	.can_queue		= CAN_QUEUE,
 	.this_id		= 7,
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 4e047b5001a670ce8038c3b2df2a72ef592b9c74..718c88de328bff527c37ce1edd33f03208b8e711 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -61,7 +61,7 @@ static struct scsi_host_template mvs_sht = {
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
-	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 	.shost_attrs		= mvst_host_attrs,
@@ -125,8 +125,7 @@ static void mvs_free(struct mvs_info *mvi)
 	else
 		slot_nr = MVS_CHIP_SLOT_SZ;
 
-	if (mvi->dma_pool)
-		pci_pool_destroy(mvi->dma_pool);
+	dma_pool_destroy(mvi->dma_pool);
 
 	if (mvi->tx)
 		dma_free_coherent(mvi->dev,
@@ -296,7 +295,8 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
 		goto err_out;
 
 	sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
-	mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
+	mvi->dma_pool = dma_pool_create(pool_name, &mvi->pdev->dev,
+					MVS_SLOT_BUF_SZ, 16, 0);
 	if (!mvi->dma_pool) {
 			printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name);
 			goto err_out;
@@ -557,14 +557,14 @@ static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent)
 	SHOST_TO_SAS_HA(shost) =
 		kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
 	if (!SHOST_TO_SAS_HA(shost)) {
-		kfree(shost);
+		scsi_host_put(shost);
 		rc = -ENOMEM;
 		goto err_out_regions;
 	}
 
 	rc = mvs_prep_sas_ha_init(shost, chip);
 	if (rc) {
-		kfree(shost);
+		scsi_host_put(shost);
 		rc = -ENOMEM;
 		goto err_out_regions;
 	}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index c7cc8035eacb371eb3ae6e038abb6df7c94e7169..ee81d10252e043fa1ed505d429ed7a7dbcebde3d 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -790,7 +790,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
 	slot->n_elem = n_elem;
 	slot->slot_tag = tag;
 
-	slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
+	slot->buf = dma_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
 	if (!slot->buf) {
 		rc = -ENOMEM;
 		goto err_out_tag;
@@ -840,7 +840,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
 	return rc;
 
 err_out_slot_buf:
-	pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
+	dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
 err_out_tag:
 	mvs_tag_free(mvi, tag);
 err_out:
@@ -918,7 +918,7 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
 	}
 
 	if (slot->buf) {
-		pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
+		dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
 		slot->buf = NULL;
 	}
 	list_del_init(&slot->entry);
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 53c84771f0e8939f53e561cad198c40ac337c2f2..107e191bf0239eb55475f84a4494111bba7a9cd8 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -201,7 +201,6 @@ static int         nsp32_release     (struct Scsi_Host *);
 
 /* SCSI error handler */
 static int         nsp32_eh_abort     (struct scsi_cmnd *);
-static int         nsp32_eh_bus_reset (struct scsi_cmnd *);
 static int         nsp32_eh_host_reset(struct scsi_cmnd *);
 
 /* generate SCSI message */
@@ -276,8 +275,7 @@ static struct scsi_host_template nsp32_template = {
 	.max_sectors			= 128,
 	.this_id			= NSP32_HOST_SCSIID,
 	.use_clustering			= DISABLE_CLUSTERING,
-	.eh_abort_handler       	= nsp32_eh_abort,
-	.eh_bus_reset_handler		= nsp32_eh_bus_reset,
+	.eh_abort_handler		= nsp32_eh_abort,
 	.eh_host_reset_handler		= nsp32_eh_host_reset,
 /*	.highmem_io			= 1, */
 };
@@ -2845,24 +2843,6 @@ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
 	return SUCCESS;
 }
 
-static int nsp32_eh_bus_reset(struct scsi_cmnd *SCpnt)
-{
-	nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
-	unsigned int   base = SCpnt->device->host->io_port;
-
-	spin_lock_irq(SCpnt->device->host->host_lock);
-
-	nsp32_msg(KERN_INFO, "Bus Reset");	
-	nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
-
-	nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
-	nsp32_do_bus_reset(data);
-	nsp32_write2(base, IRQ_CONTROL, 0);
-
-	spin_unlock_irq(SCpnt->device->host->host_lock);
-	return SUCCESS;	/* SCSI bus reset is succeeded at any time. */
-}
-
 static void nsp32_do_bus_reset(nsp32_hw_data *data)
 {
 	unsigned int   base = data->BaseAddress;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 929ee7e88120b2372468b7f58905d44fe7a0b7a2..20ec1c01dbd5d1e0595aa41e397b898131f9d255 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -619,7 +619,7 @@ static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int q
 	os_aux_t           * aux  = STp->buffer->aux;
 	os_partition_t     * par  = &(aux->partition);
 	struct st_partstat * STps = &(STp->ps[STp->partition]);
-	int		     blk_cnt, blk_sz, i;
+	unsigned int	     blk_cnt, blk_sz, i;
 
 	if (STp->raw) {
 		if (STp->buffer->syscall_result) {
@@ -5434,7 +5434,7 @@ static int append_to_buffer(const char __user *ubp, struct osst_buffer *st_bp, i
 
 	for (i=0, offset=st_bp->buffer_bytes;
 	     i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
-	offset -= st_bp->sg[i].length;
+		offset -= st_bp->sg[i].length;
 	if (i == st_bp->sg_segs) {  /* Should never happen */
 		printk(KERN_WARNING "osst :A: Append_to_buffer offset overflow.\n");
 		return (-EIO);
@@ -5667,12 +5667,12 @@ static	struct	osst_support_data support_list[] = {
  * sysfs support for osst driver parameter information
  */
 
-static ssize_t osst_version_show(struct device_driver *ddd, char *buf)
+static ssize_t version_show(struct device_driver *ddd, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%s\n", osst_version);
 }
 
-static DRIVER_ATTR(version, S_IRUGO, osst_version_show, NULL);
+static DRIVER_ATTR_RO(version);
 
 static int osst_create_sysfs_files(struct device_driver *sysfs)
 {
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 714b248f5d5e675bae69c9f4361db060dcce02a6..953a792150ae012adbd5b62d6f50690e35e863cd 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -173,7 +173,7 @@ static void fdomain_release(struct pcmcia_device *link)
 
 static int fdomain_resume(struct pcmcia_device *link)
 {
-	fdomain_16x0_bus_reset(NULL);
+	fdomain_16x0_host_reset(NULL);
 
 	return 0;
 }
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index c670dc704c74eb09ca90be4f2cc49e3ede651926..0556054764dc15fe6deeb0d2096aeb6bf0fd613d 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -67,7 +67,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
 	.info			= qlogicfas408_info,
 	.queuecommand		= qlogicfas408_queuecommand,
 	.eh_abort_handler	= qlogicfas408_abort,
-	.eh_bus_reset_handler	= qlogicfas408_bus_reset,
+	.eh_host_reset_handler	= qlogicfas408_host_reset,
 	.bios_param		= qlogicfas408_biosparam,
 	.can_queue		= 1,
 	.this_id		= -1,
@@ -264,7 +264,7 @@ static int qlogic_resume(struct pcmcia_device *link)
 		outb(0x04, link->resource[0]->start + 0xd);
 	}
 	/* Ugggglllyyyy!!! */
-	qlogicfas408_bus_reset(NULL);
+	qlogicfas408_host_reset(NULL);
 
 	return 0;
 }
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 034b2f7d113586d58292dce0e7129368b2fc14ae..0e013f76b582e1f295d8fad7c87e2b1bda18aa64 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -86,7 +86,7 @@ static struct scsi_host_template pm8001_sht = {
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
-	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
+	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
 	.shost_attrs		= pm8001_host_attrs,
@@ -160,8 +160,6 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
 			}
 	}
 	PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
-	if (pm8001_ha->shost)
-		scsi_host_put(pm8001_ha->shost);
 	flush_workqueue(pm8001_wq);
 	kfree(pm8001_ha->tags);
 	kfree(pm8001_ha);
@@ -1073,7 +1071,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
 err_out_free:
 	kfree(SHOST_TO_SAS_HA(shost));
 err_out_free_host:
-	kfree(shost);
+	scsi_host_put(shost);
 err_out_regions:
 	pci_release_regions(pdev);
 err_out_disable:
@@ -1112,6 +1110,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
 		for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
 			tasklet_kill(&pm8001_ha->tasklet[j]);
 #endif
+	scsi_host_put(pm8001_ha->shost);
 	pm8001_free(pm8001_ha);
 	kfree(sha->sas_phy);
 	kfree(sha->sas_port);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 1cc814f1505a3c149133a0c8c5c2f5f556c056d4..b4d6cd8cd1ad5bc1ad0590ecefb31e6809a5c3f3 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1595,12 +1595,7 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
 	if (pinstance->ccn.hcam->notification_type ==
 	    NOTIFICATION_TYPE_ENTRY_CHANGED &&
 	    cfg_entry->resource_type == RES_TYPE_VSET) {
-
-		if (fw_version <= PMCRAID_FW_VERSION_1)
-			hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
-		else
-			hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
-
+		hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
 	} else if (!pmcraid_expose_resource(fw_version, cfg_entry)) {
 		goto out_notify_apps;
 	}
@@ -4655,13 +4650,13 @@ pmcraid_release_control_blocks(
 		return;
 
 	for (i = 0; i < max_index; i++) {
-		pci_pool_free(pinstance->control_pool,
+		dma_pool_free(pinstance->control_pool,
 			      pinstance->cmd_list[i]->ioa_cb,
 			      pinstance->cmd_list[i]->ioa_cb_bus_addr);
 		pinstance->cmd_list[i]->ioa_cb = NULL;
 		pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
 	}
-	pci_pool_destroy(pinstance->control_pool);
+	dma_pool_destroy(pinstance->control_pool);
 	pinstance->control_pool = NULL;
 }
 
@@ -4718,8 +4713,8 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
 		pinstance->host->unique_id);
 
 	pinstance->control_pool =
-		pci_pool_create(pinstance->ctl_pool_name,
-				pinstance->pdev,
+		dma_pool_create(pinstance->ctl_pool_name,
+				&pinstance->pdev->dev,
 				sizeof(struct pmcraid_control_block),
 				PMCRAID_IOARCB_ALIGNMENT, 0);
 
@@ -4728,7 +4723,7 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
 
 	for (i = 0; i < PMCRAID_MAX_CMD; i++) {
 		pinstance->cmd_list[i]->ioa_cb =
-			pci_pool_alloc(
+			dma_pool_alloc(
 				pinstance->control_pool,
 				GFP_KERNEL,
 				&(pinstance->cmd_list[i]->ioa_cb_bus_addr));
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 01eb2bc16dc1daf9fe4392768bbf421e0f760115..8bfac72a242b77a22c2a2e1947ec33558345415e 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -755,7 +755,7 @@ struct pmcraid_instance {
 
 	/* structures related to command blocks */
 	struct kmem_cache *cmd_cachep;		/* cache for cmd blocks */
-	struct pci_pool *control_pool;		/* pool for control blocks */
+	struct dma_pool *control_pool;		/* pool for control blocks */
 	char   cmd_pool_name[64];		/* name of cmd cache */
 	char   ctl_pool_name[64];		/* name of control cache */
 
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index f6ad579280d49dcc624ceffd1280387b3c6a5690..7be5823ab036b75a484fbf6641f24b17cc618f24 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -970,7 +970,6 @@ static struct scsi_host_template ppa_template = {
 	.name			= "Iomega VPI0 (ppa) interface",
 	.queuecommand		= ppa_queuecommand,
 	.eh_abort_handler	= ppa_abort,
-	.eh_bus_reset_handler	= ppa_reset,
 	.eh_host_reset_handler	= ppa_reset,
 	.bios_param		= ppa_biosparam,
 	.this_id		= -1,
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 351f06dfc5a0dac7bf4f7606166611848876d477..9bf7b227e69ad021e21807d8ce3370376bb7d93e 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -300,7 +300,6 @@ struct qedf_ctx {
 #define QEDF_FALLBACK_VLAN	1002
 #define QEDF_DEFAULT_PRIO	3
 	int vlan_id;
-	uint vlan_hw_insert:1;
 	struct qed_dev *cdev;
 	struct qed_dev_fcoe_info dev_info;
 	struct qed_int_info int_info;
@@ -443,7 +442,6 @@ extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr);
 extern int qedf_queuecommand(struct Scsi_Host *host,
 	struct scsi_cmnd *sc_cmd);
 extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
-extern void qedf_update_src_mac(struct fc_lport *lport, u8 *addr);
 extern u8 *qedf_get_src_mac(struct fc_lport *lport);
 extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
 extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf);
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index aefd24ca960423f3fed540e58da894b7c94f3d54..773558fc0697513a523eeae10b6598eb37ea22a9 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -108,7 +108,6 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
 	struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr);
 	struct ethhdr *eth_hdr;
-	struct vlan_ethhdr *vlan_hdr;
 	struct fip_header *fiph;
 	u16 op, vlan_tci = 0;
 	u8 sub;
@@ -124,16 +123,14 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	op = ntohs(fiph->fip_op);
 	sub = fiph->fip_subcode;
 
-	if (!qedf->vlan_hw_insert) {
-		vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
-		memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
-		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
-		vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
-		vlan_hdr->h_vlan_TCI = vlan_tci =  htons(qedf->vlan_id);
-	}
+	/*
+	 * Add VLAN tag to non-offload FIP frame based on current stored VLAN
+	 * for FIP/FCoE traffic.
+	 */
+	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
 
-	/* Update eth_hdr since we added a VLAN tag */
-	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+	/* Get VLAN ID from skb for printing purposes */
+	__vlan_hwaccel_get_tag(skb, &vlan_tci);
 
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: "
 	    "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub,
@@ -174,7 +171,6 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
 	/* Handle FIP VLAN resp in the driver */
 	if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
 		qedf_fcoe_process_vlan_resp(qedf, skb);
-		qedf->vlan_hw_insert = 0;
 		kfree_skb(skb);
 	} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual "
@@ -242,26 +238,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
 	}
 }
 
-void qedf_update_src_mac(struct fc_lport *lport, u8 *addr)
-{
-	struct qedf_ctx *qedf = lport_priv(lport);
-
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
-	    "Setting data_src_addr=%pM.\n", addr);
-	ether_addr_copy(qedf->data_src_addr, addr);
-}
-
 u8 *qedf_get_src_mac(struct fc_lport *lport)
 {
-	u8 mac[ETH_ALEN];
-	u8 port_id[3];
 	struct qedf_ctx *qedf = lport_priv(lport);
 
-	/* We need to use the lport port_id to create the data_src_addr */
-	if (is_zero_ether_addr(qedf->data_src_addr)) {
-		hton24(port_id, lport->port_id);
-		fc_fcoe_set_mac(mac, port_id);
-		qedf->ctlr.update_mac(lport, mac);
-	}
 	return qedf->data_src_addr;
 }
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 1d13c9ca517de7e2cec033bdf050498fafbfeccc..7c0064500cc528a9d8c29e3ecd4ff66f005717d6 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -18,6 +18,7 @@
 #include <linux/kthread.h>
 #include <scsi/libfc.h>
 #include <scsi/scsi_host.h>
+#include <scsi/fc_frame.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/cpu.h>
@@ -42,7 +43,7 @@ MODULE_PARM_DESC(dev_loss_tmo,  " dev_loss_tmo setting for attached "
 
 uint qedf_debug = QEDF_LOG_INFO;
 module_param_named(debug, qedf_debug, uint, S_IRUGO);
-MODULE_PARM_DESC(qedf_debug, " Debug mask. Pass '1' to enable default debugging"
+MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
 	" mask");
 
 static uint qedf_fipvlan_retries = 30;
@@ -163,7 +164,7 @@ static void qedf_handle_link_update(struct work_struct *work)
 		QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
 			   "response, falling back to default VLAN %d.\n",
 			   qedf_fallback_vlan);
-		qedf_set_vlan_id(qedf, QEDF_FALLBACK_VLAN);
+		qedf_set_vlan_id(qedf, qedf_fallback_vlan);
 
 		/*
 		 * Zero out data_src_addr so we'll update it with the new
@@ -187,6 +188,50 @@ static void qedf_handle_link_update(struct work_struct *work)
 	}
 }
 
+#define	QEDF_FCOE_MAC_METHOD_GRANGED_MAC		1
+#define QEDF_FCOE_MAC_METHOD_FCF_MAP			2
+#define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC		3
+static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
+{
+	u8 *granted_mac;
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	u8 fc_map[3];
+	int method = 0;
+
+	/* Get granted MAC address from FIP FLOGI payload */
+	granted_mac = fr_cb(fp)->granted_mac;
+
+	/*
+	 * We set the source MAC for FCoE traffic based on the Granted MAC
+	 * address from the switch.
+	 *
+	 * If granted_mac is non-zero, we used that.
+	 * If the granted_mac is zeroed out, created the FCoE MAC based on
+	 * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
+	 * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
+	 * d_id of the FLOGI frame.
+	 */
+	if (!is_zero_ether_addr(granted_mac)) {
+		ether_addr_copy(qedf->data_src_addr, granted_mac);
+		method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
+	} else if (qedf->ctlr.sel_fcf->fc_map != 0) {
+		hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
+		qedf->data_src_addr[0] = fc_map[0];
+		qedf->data_src_addr[1] = fc_map[1];
+		qedf->data_src_addr[2] = fc_map[2];
+		qedf->data_src_addr[3] = fh->fh_d_id[0];
+		qedf->data_src_addr[4] = fh->fh_d_id[1];
+		qedf->data_src_addr[5] = fh->fh_d_id[2];
+		method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
+	} else {
+		fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
+		method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
+	}
+
+	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+	    "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
+}
+
 static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
 	void *arg)
 {
@@ -212,6 +257,10 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
 	/* Log stats for FLOGI reject */
 	if (fc_frame_payload_op(fp) == ELS_LS_RJT)
 		qedf->flogi_failed++;
+	else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
+		/* Set the source MAC we will use for FCoE traffic */
+		qedf_set_data_src_addr(qedf, fp);
+	}
 
 	/* Complete flogi_compl so we can proceed to sending ADISCs */
 	complete(&qedf->flogi_compl);
@@ -312,8 +361,9 @@ static void qedf_link_recovery(struct work_struct *work)
 	/* Since the link when down and up to verify which vlan we're on */
 	qedf->fipvlan_retries = qedf_fipvlan_retries;
 	rc = qedf_initiate_fipvlan_req(qedf);
+	/* If getting the VLAN fails, set the VLAN to the fallback one */
 	if (!rc)
-		return;
+		qedf_set_vlan_id(qedf, qedf_fallback_vlan);
 
 	/*
 	 * We need to wait for an FCF to be selected due to the
@@ -629,16 +679,6 @@ static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
 	return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 }
 
-static int qedf_eh_bus_reset(struct scsi_cmnd *sc_cmd)
-{
-	QEDF_ERR(NULL, "BUS RESET Issued...\n");
-	/*
-	 * Essentially a no-op but return SUCCESS to prevent
-	 * unnecessary escalation to the host reset handler.
-	 */
-	return SUCCESS;
-}
-
 void qedf_wait_for_upload(struct qedf_ctx *qedf)
 {
 	while (1) {
@@ -716,7 +756,6 @@ static struct scsi_host_template qedf_host_template = {
 	.eh_abort_handler	= qedf_eh_abort,
 	.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
 	.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
-	.eh_bus_reset_handler = qedf_eh_bus_reset,
 	.eh_host_reset_handler  = qedf_eh_host_reset,
 	.slave_configure	= qedf_slave_configure,
 	.dma_boundary = QED_HW_DMA_BOUNDARY,
@@ -915,6 +954,10 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
 	skb->mac_len = elen;
 	skb->protocol = htons(ETH_P_FCOE);
 
+	/*
+	 * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
+	 * for FIP/FCoE traffic.
+	 */
 	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
 
 	/* fill up mac and fcoe headers */
@@ -927,7 +970,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
 		ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
 
 	/* Set the source MAC address */
-	fc_fcoe_set_mac(eh->h_source, fh->fh_s_id);
+	ether_addr_copy(eh->h_source, qedf->data_src_addr);
 
 	hp = (struct fcoe_hdr *)(eh + 1);
 	memset(hp, 0, sizeof(*hp));
@@ -1025,7 +1068,6 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
 {
 	struct qed_fcoe_params_offload conn_info;
 	u32 port_id;
-	u8 lport_src_id[3];
 	int rval;
 	uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
 
@@ -1054,11 +1096,7 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
 	    (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
 
 	/* Need to use our FCoE MAC for the offload session */
-	port_id = fc_host_port_id(qedf->lport->host);
-	lport_src_id[2] = (port_id & 0x000000FF);
-	lport_src_id[1] = (port_id & 0x0000FF00) >> 8;
-	lport_src_id[0] = (port_id & 0x00FF0000) >> 16;
-	fc_fcoe_set_mac(conn_info.src_mac, lport_src_id);
+	ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
 
 	ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
 
@@ -1347,7 +1385,6 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
 	fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
 
 	qedf->ctlr.send = qedf_fip_send;
-	qedf->ctlr.update_mac = qedf_update_src_mac;
 	qedf->ctlr.get_src_addr = qedf_get_src_mac;
 	ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
 }
@@ -2939,7 +2976,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 
 	sprintf(host_buf, "qedf_%u_link",
 	    qedf->lport->host->host_no);
-	qedf->link_update_wq = create_singlethread_workqueue(host_buf);
+	qedf->link_update_wq = create_workqueue(host_buf);
 	INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
 	INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
 
@@ -3056,9 +3093,24 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
 		   qedf->mac);
 
-	/* Set the WWNN and WWPN based on the MAC address */
-	qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
-	qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
+	/*
+	 * Set the WWNN and WWPN in the following way:
+	 *
+	 * If the info we get from qed is non-zero then use that to set the
+	 * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
+	 * on the MAC address.
+	 */
+	if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
+		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+		    "Setting WWPN and WWNN from qed dev_info.\n");
+		qedf->wwnn = qedf->dev_info.wwnn;
+		qedf->wwpn = qedf->dev_info.wwpn;
+	} else {
+		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+		    "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
+		qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
+		qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
+	}
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,  "WWNN=%016llx "
 		   "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
 
@@ -3094,7 +3146,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	/* Start LL2 processing thread */
 	snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
 	qedf->ll2_recv_wq =
-		create_singlethread_workqueue(host_buf);
+		create_workqueue(host_buf);
 	if (!qedf->ll2_recv_wq) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
 		goto err7;
@@ -3114,8 +3166,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	}
 	set_bit(QEDF_LL2_STARTED, &qedf->flags);
 
-	/* hw will be insterting vlan tag*/
-	qedf->vlan_hw_insert = 1;
+	/* Set initial FIP/FCoE VLAN to NULL */
 	qedf->vlan_id = 0;
 
 	/*
@@ -3137,7 +3188,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 
 	sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
 	qedf->timer_work_queue =
-		create_singlethread_workqueue(host_buf);
+		create_workqueue(host_buf);
 	if (!qedf->timer_work_queue) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
 			  "workqueue.\n");
@@ -3148,7 +3199,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	if (mode != QEDF_MODE_RECOVERY) {
 		sprintf(host_buf, "qedf_%u_dpc",
 		    qedf->lport->host->host_no);
-		qedf->dpc_wq = create_singlethread_workqueue(host_buf);
+		qedf->dpc_wq = create_workqueue(host_buf);
 	}
 
 	/*
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 6fa442061c32afc7005dec1c982132607b309d1a..397b3b8ee51a48eb5aca034c5ad486c6e0381bed 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -7,9 +7,9 @@
  *  this source tree.
  */
 
-#define QEDF_VERSION		"8.18.22.0"
+#define QEDF_VERSION		"8.20.5.0"
 #define QEDF_DRIVER_MAJOR_VER		8
-#define QEDF_DRIVER_MINOR_VER		18
-#define QEDF_DRIVER_REV_VER		22
+#define QEDF_DRIVER_MINOR_VER		20
+#define QEDF_DRIVER_REV_VER		5
 #define QEDF_DRIVER_ENG_VER		0
 
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 91d2f51c351b4d6940d04a1e4a261f2d816e7b04..b8b22ce60ecc1d2ad626dd79732948904a2a758f 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -54,8 +54,8 @@ struct qedi_endpoint;
 /* MAX Length for cached SGL */
 #define MAX_SGLEN_FOR_CACHESGL	((1U << 16) - 1)
 
-#define MAX_NUM_MSIX_PF         8
-#define MIN_NUM_CPUS_MSIX(x)	min((x)->msix_count, num_online_cpus())
+#define MIN_NUM_CPUS_MSIX(x)	min_t(u32, x->dev_info.num_cqs, \
+					num_online_cpus())
 
 #define QEDI_LOCAL_PORT_MIN     60000
 #define QEDI_LOCAL_PORT_MAX     61024
@@ -301,7 +301,6 @@ struct qedi_ctx {
 	u16 bdq_prod_idx;
 	u16 rq_num_entries;
 
-	u32 msix_count;
 	u32 max_sqes;
 	u8 num_queues;
 	u32 max_active_conns;
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 37da9a8b43b1f1dfa25133cab878ce39d6efcde3..a02b34ea5cab15194e64b17539fca3ad654e7b04 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -534,7 +534,7 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
 	SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1);
 	SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1);
 
-	conn_info->default_cq = (qedi_ep->fw_cid % 8);
+	conn_info->default_cq = (qedi_ep->fw_cid % qedi->num_queues);
 
 	conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
 	conn_info->dup_ack_theshold = 3;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 2c37836848152f91b394bfeea4e120ce2d35f46a..cccc34adc0e02429417db29b0b124bf8de57051d 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -794,13 +794,14 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
 	u32 log_page_size;
 	int rval = 0;
 
-	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n",
-		  MIN_NUM_CPUS_MSIX(qedi));
 
 	num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
 
 	qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
 
+	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+		  "Number of CQ count is %d\n", qedi->num_queues);
+
 	memset(&qedi->pf_params.iscsi_pf_params, 0,
 	       sizeof(qedi->pf_params.iscsi_pf_params));
 
@@ -1575,7 +1576,7 @@ struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
 {
 	struct qedi_cmd *cmd = NULL;
 
-	if (tid > MAX_ISCSI_TASK_ENTRIES)
+	if (tid >= MAX_ISCSI_TASK_ENTRIES)
 		return NULL;
 
 	cmd = qedi->itt_map[tid].p_cmd;
@@ -2179,9 +2180,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
 		goto free_host;
 	}
 
-	qedi->msix_count = MAX_NUM_MSIX_PF;
 	atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
 
+	rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
+	if (rc)
+		goto free_host;
+
 	if (mode != QEDI_MODE_RECOVERY) {
 		rc = qedi_set_iscsi_pf_param(qedi);
 		if (rc) {
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 08a1feb3a1957ca6d3fe1a68bb32a5ed2a359f63..9ce28c4f9812b6a4890fd909ae3fec8f6b39bd0c 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -318,6 +318,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 		return -EINVAL;
 	if (start > ha->optrom_size)
 		return -EINVAL;
+	if (size > ha->optrom_size - start)
+		size = ha->optrom_size - start;
 
 	mutex_lock(&ha->optrom_mutex);
 	switch (val) {
@@ -343,8 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 		}
 
 		ha->optrom_region_start = start;
-		ha->optrom_region_size = start + size > ha->optrom_size ?
-		    ha->optrom_size - start : size;
+		ha->optrom_region_size = start + size;
 
 		ha->optrom_state = QLA_SREADING;
 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -417,8 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
 		}
 
 		ha->optrom_region_start = start;
-		ha->optrom_region_size = start + size > ha->optrom_size ?
-		    ha->optrom_size - start : size;
+		ha->optrom_region_size = start + size;
 
 		ha->optrom_state = QLA_SWRITING;
 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -565,47 +565,17 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
 {
 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
-	struct qla_hw_data *ha = vha->hw;
-	uint16_t iter, addr, offset;
 	int rval;
 
-	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
+	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
 		return 0;
 
-	if (ha->sfp_data)
-		goto do_read;
-
-	ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
-	    &ha->sfp_data_dma);
-	if (!ha->sfp_data) {
-		ql_log(ql_log_warn, vha, 0x706c,
-		    "Unable to allocate memory for SFP read-data.\n");
+	if (qla2x00_reset_active(vha))
 		return 0;
-	}
-
-do_read:
-	memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
-	addr = 0xa0;
-	for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
-	    iter++, offset += SFP_BLOCK_SIZE) {
-		if (iter == 4) {
-			/* Skip to next device address. */
-			addr = 0xa2;
-			offset = 0;
-		}
-
-		rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
-		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
-		if (rval != QLA_SUCCESS) {
-			ql_log(ql_log_warn, vha, 0x706d,
-			    "Unable to read SFP data (%x/%x/%x).\n", rval,
-			    addr, offset);
 
-			return -EIO;
-		}
-		memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
-		buf += SFP_BLOCK_SIZE;
-	}
+	rval = qla2x00_read_sfp_dev(vha, buf, count);
+	if (rval)
+		return -EIO;
 
 	return count;
 }
@@ -615,7 +585,7 @@ static struct bin_attribute sysfs_sfp_attr = {
 		.name = "sfp",
 		.mode = S_IRUSR | S_IWUSR,
 	},
-	.size = SFP_DEV_SIZE * 2,
+	.size = SFP_DEV_SIZE,
 	.read = qla2x00_sysfs_read_sfp,
 };
 
@@ -1511,6 +1481,38 @@ qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
 	    ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
 }
 
+static ssize_t
+qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+	    ha->min_link_speed == 5 ? "32Gps" :
+	    ha->min_link_speed == 4 ? "16Gps" :
+	    ha->min_link_speed == 3 ? "8Gps" :
+	    ha->min_link_speed == 2 ? "4Gps" :
+	    ha->min_link_speed != 0 ? "unknown" : "");
+}
+
+static ssize_t
+qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+	    ha->max_speed_sup ? "32Gps" : "16Gps");
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1556,6 +1558,8 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
 		   qla2x00_allow_cna_fw_dump_show,
 		   qla2x00_allow_cna_fw_dump_store);
 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
+static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
+static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
 
 struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_driver_version,
@@ -1590,6 +1594,8 @@ struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_fw_dump_size,
 	&dev_attr_allow_cna_fw_dump,
 	&dev_attr_pep_version,
+	&dev_attr_min_link_speed,
+	&dev_attr_max_speed_sup,
 	NULL,
 };
 
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 26751d34bcf2fbaea7cd404ce09413044436b365..3e9dc54b89a32f1704e4d68569d667ee673d34e4 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -14,7 +14,7 @@
  * | Module Init and Probe        |       0x0193       | 0x0146         |
  * |                              |                    | 0x015b-0x0160	|
  * |                              |                    | 0x016e		|
- * | Mailbox commands             |       0x1199       | 0x1193		|
+ * | Mailbox commands             |       0x1205       | 0x11a2-0x11ff	|
  * | Device Discovery             |       0x2134       | 0x210e-0x2116  |
  * |				  | 		       | 0x211a         |
  * |                              |                    | 0x211c-0x2128  |
@@ -41,7 +41,7 @@
  * |                              |                    | 0x70ad-0x70ae  |
  * |                              |                    | 0x70d0-0x70d6	|
  * |                              |                    | 0x70d7-0x70db  |
- * | Task Management              |       0x8042       | 0x8000,0x800b  |
+ * | Task Management              |       0x8042       | 0x8000         |
  * |                              |                    | 0x8019         |
  * |                              |                    | 0x8025,0x8026  |
  * |                              |                    | 0x8031,0x8032  |
@@ -2520,8 +2520,6 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 static inline int
 ql_mask_match(uint32_t level)
 {
-	if (ql2xextended_error_logging == 1)
-		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
 	return (level & ql2xextended_error_logging) == level;
 }
 
@@ -2738,9 +2736,9 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
 		mbx_reg = MAILBOX_REG(ha, reg, 0);
 
 	ql_dbg(level, vha, id, "Mailbox registers:\n");
-	for (i = 0; i < 6; i++)
+	for (i = 0; i < 6; i++, mbx_reg++)
 		ql_dbg(level, vha, id,
-		    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
+		    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg));
 }
 
 
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 0730b10b42803ef18243960705b3a3ae220882d4..486c075998f635e34b95d7ce1ea20ee632abfcd2 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -427,6 +427,7 @@ struct srb_iocb {
 			enum nvmefc_fcp_datadir dir;
 			uint32_t dl;
 			uint32_t timeout_sec;
+			struct	list_head   entry;
 		} nvme;
 	} u;
 
@@ -470,7 +471,7 @@ typedef struct srb {
 	uint8_t cmd_type;
 	uint8_t pad[3];
 	atomic_t ref_count;
-	wait_queue_head_t nvme_ls_waitQ;
+	wait_queue_head_t nvme_ls_waitq;
 	struct fc_port *fcport;
 	struct scsi_qla_host *vha;
 	uint32_t handle;
@@ -901,6 +902,7 @@ struct mbx_cmd_32 {
 #define MBA_SHUTDOWN_REQUESTED	0x8062	/* Shutdown Requested */
 #define MBA_TEMPERATURE_ALERT	0x8070	/* Temperature Alert */
 #define MBA_DPORT_DIAGNOSTICS	0x8080	/* D-port Diagnostics */
+#define MBA_TRANS_INSERT	0x8130	/* Transceiver Insertion */
 #define MBA_FW_INIT_FAILURE	0x8401	/* Firmware initialization failure */
 #define MBA_MIRROR_LUN_CHANGE	0x8402	/* Mirror LUN State Change
 					   Notification */
@@ -977,6 +979,7 @@ struct mbx_cmd_32 {
 #define MBC_ABORT_TARGET		0x17	/* Abort target (ID). */
 #define MBC_RESET			0x18	/* Reset. */
 #define MBC_GET_ADAPTER_LOOP_ID		0x20	/* Get loop id of ISP2200. */
+#define MBC_GET_SET_ZIO_THRESHOLD	0x21	/* Get/SET ZIO THRESHOLD. */
 #define MBC_GET_RETRY_COUNT		0x22	/* Get f/w retry cnt/delay. */
 #define MBC_DISABLE_VI			0x24	/* Disable VI operation. */
 #define MBC_ENABLE_VI			0x25	/* Enable VI operation. */
@@ -2301,8 +2304,7 @@ typedef struct fc_port {
 	unsigned int login_succ:1;
 
 	struct work_struct nvme_del_work;
-	atomic_t nvme_ref_count;
-	wait_queue_head_t nvme_waitQ;
+	struct completion nvme_del_done;
 	uint32_t nvme_prli_service_param;
 #define NVME_PRLI_SP_CONF       BIT_7
 #define NVME_PRLI_SP_INITIATOR  BIT_5
@@ -3338,6 +3340,7 @@ struct qla_qpair {
 	struct work_struct q_work;
 	struct list_head qp_list_elem; /* vha->qp_list */
 	struct list_head hints_list;
+	struct list_head nvme_done_list;
 	uint16_t cpuid;
 	struct qla_tgt_counters tgt_counters;
 };
@@ -3463,8 +3466,15 @@ struct qla_hw_data {
 		uint32_t	n2n_ae:1;
 		uint32_t	fw_started:1;
 		uint32_t	fw_init_done:1;
+
+		uint32_t	detected_lr_sfp:1;
+		uint32_t	using_lr_setting:1;
 	} flags;
 
+	uint16_t long_range_distance;	/* 32G & above */
+#define LR_DISTANCE_5K  1
+#define LR_DISTANCE_10K 0
+
 	/* This spinlock is used to protect "io transactions", you must
 	* acquire it before doing any IO to the card, eg with RD_REG*() and
 	* WRT_REG*() for the duration of your entire commandtransaction.
@@ -3712,7 +3722,7 @@ struct qla_hw_data {
 	struct sns_cmd_pkt	*sns_cmd;
 	dma_addr_t		sns_cmd_dma;
 
-#define SFP_DEV_SIZE    256
+#define SFP_DEV_SIZE    512
 #define SFP_BLOCK_SIZE  64
 	void		*sfp_data;
 	dma_addr_t	sfp_data_dma;
@@ -4017,8 +4027,20 @@ struct qla_hw_data {
 
 	struct qlt_hw_data tgt;
 	int	allow_cna_fw_dump;
+	uint32_t fw_ability_mask;
+	uint16_t min_link_speed;
+	uint16_t max_speed_sup;
+
+	atomic_t        nvme_active_aen_cnt;
+	uint16_t        nvme_last_rptd_aen;             /* Last recorded aen count */
 };
 
+#define FW_ABILITY_MAX_SPEED_MASK	0xFUL
+#define FW_ABILITY_MAX_SPEED_16G	0x0
+#define FW_ABILITY_MAX_SPEED_32G	0x1
+#define FW_ABILITY_MAX_SPEED(ha)	\
+	(ha->fw_ability_mask & FW_ABILITY_MAX_SPEED_MASK)
+
 /*
  * Qlogic scsi host structure
  */
@@ -4089,6 +4111,8 @@ typedef struct scsi_qla_host {
 #define FX00_CRITEMP_RECOVERY	25
 #define FX00_HOST_INFO_RESEND	26
 #define QPAIR_ONLINE_CHECK_NEEDED	27
+#define SET_ZIO_THRESHOLD_NEEDED	28
+#define DETECT_SFP_CHANGE	29
 
 	unsigned long	pci_flags;
 #define PFLG_DISCONNECTED	0	/* PCI device removed */
@@ -4129,8 +4153,7 @@ typedef struct scsi_qla_host {
 	uint8_t		fabric_node_name[WWN_SIZE];
 
 	struct		nvme_fc_local_port *nvme_local_port;
-	atomic_t	nvme_ref_count;
-	wait_queue_head_t nvme_waitQ;
+	struct completion nvme_del_done;
 	struct list_head nvme_rport_list;
 	atomic_t 	nvme_active_aen_cnt;
 	uint16_t	nvme_last_rptd_aen;
@@ -4199,6 +4222,7 @@ typedef struct scsi_qla_host {
 	int fcport_count;
 	wait_queue_head_t fcport_waitQ;
 	wait_queue_head_t vref_waitq;
+	uint8_t min_link_speed_feat;
 } scsi_qla_host_t;
 
 struct qla27xx_image_status {
@@ -4373,6 +4397,88 @@ enum nexus_wait_type {
 	WAIT_LUN,
 };
 
+/* Refer to SNIA SFF 8247 */
+struct sff_8247_a0 {
+	u8 txid;	/* transceiver id */
+	u8 ext_txid;
+	u8 connector;
+	/* compliance code */
+	u8 eth_infi_cc3;	/* ethernet, inifiband */
+	u8 sonet_cc4[2];
+	u8 eth_cc6;
+	/* link length */
+#define FC_LL_VL BIT_7	/* very long */
+#define FC_LL_S  BIT_6	/* Short */
+#define FC_LL_I  BIT_5	/* Intermidiate*/
+#define FC_LL_L  BIT_4	/* Long */
+#define FC_LL_M  BIT_3	/* Medium */
+#define FC_LL_SA BIT_2	/* ShortWave laser */
+#define FC_LL_LC BIT_1	/* LongWave laser */
+#define FC_LL_EL BIT_0	/* Electrical inter enclosure */
+	u8 fc_ll_cc7;
+	/* FC technology */
+#define FC_TEC_EL BIT_7	/* Electrical inter enclosure */
+#define FC_TEC_SN BIT_6	/* short wave w/o OFC */
+#define FC_TEC_SL BIT_5	/* short wave with OFC */
+#define FC_TEC_LL BIT_4	/* Longwave Laser */
+#define FC_TEC_ACT BIT_3	/* Active cable */
+#define FC_TEC_PAS BIT_2	/* Passive cable */
+	u8 fc_tec_cc8;
+	/* Transmission Media */
+#define FC_MED_TW BIT_7	/* Twin Ax */
+#define FC_MED_TP BIT_6	/* Twited Pair */
+#define FC_MED_MI BIT_5	/* Min Coax */
+#define FC_MED_TV BIT_4	/* Video Coax */
+#define FC_MED_M6 BIT_3	/* Multimode, 62.5um */
+#define FC_MED_M5 BIT_2	/* Multimode, 50um */
+#define FC_MED_SM BIT_0	/* Single Mode */
+	u8 fc_med_cc9;
+	/* speed FC_SP_12: 12*100M = 1200 MB/s */
+#define FC_SP_12 BIT_7
+#define FC_SP_8  BIT_6
+#define FC_SP_16 BIT_5
+#define FC_SP_4  BIT_4
+#define FC_SP_32 BIT_3
+#define FC_SP_2  BIT_2
+#define FC_SP_1  BIT_0
+	u8 fc_sp_cc10;
+	u8 encode;
+	u8 bitrate;
+	u8 rate_id;
+	u8 length_km;		/* offset 14/eh */
+	u8 length_100m;
+	u8 length_50um_10m;
+	u8 length_62um_10m;
+	u8 length_om4_10m;
+	u8 length_om3_10m;
+#define SFF_VEN_NAME_LEN 16
+	u8 vendor_name[SFF_VEN_NAME_LEN];	/* offset 20/14h */
+	u8 tx_compat;
+	u8 vendor_oui[3];
+#define SFF_PART_NAME_LEN 16
+	u8 vendor_pn[SFF_PART_NAME_LEN];	/* part number */
+	u8 vendor_rev[4];
+	u8 wavelength[2];
+	u8 resv;
+	u8 cc_base;
+	u8 options[2];	/* offset 64 */
+	u8 br_max;
+	u8 br_min;
+	u8 vendor_sn[16];
+	u8 date_code[8];
+	u8 diag;
+	u8 enh_options;
+	u8 sff_revision;
+	u8 cc_ext;
+	u8 vendor_specific[32];
+	u8 resv2[128];
+};
+
+#define AUTO_DETECT_SFP_SUPPORT(_vha)\
+	(ql2xautodetectsfp && !_vha->vp_idx &&		\
+	(IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\
+	IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw)))
+
 #define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
 	(IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
 
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index b9c9886e8b1da6fd6f4fe7bc8937a254352935ba..bec641aae7b35e69b18f0411d5906c385b247e09 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1699,6 +1699,15 @@ struct access_chip_rsp_84xx {
 #define FAC_OPT_CMD_UNLOCK_SEMAPHORE	0x04
 #define FAC_OPT_CMD_GET_SECTOR_SIZE	0x05
 
+/* enhanced features bit definitions */
+#define NEF_LR_DIST_ENABLE	BIT_0
+
+/* LR Distance bit positions */
+#define LR_DIST_NV_POS		2
+#define LR_DIST_FW_POS		12
+#define LR_DIST_FW_SHIFT	(LR_DIST_FW_POS - LR_DIST_NV_POS)
+#define LR_DIST_FW_FIELD(x)	((x) << LR_DIST_FW_SHIFT & 0xf000)
+
 struct nvram_81xx {
 	/* NVRAM header. */
 	uint8_t id[4];
@@ -1745,7 +1754,9 @@ struct nvram_81xx {
 	uint16_t reserved_6_3[14];
 
 	/* Offset 192. */
-	uint16_t reserved_7[32];
+	uint8_t min_link_speed;
+	uint8_t reserved_7_0;
+	uint16_t reserved_7[31];
 
 	/*
 	 * BIT 0  = Enable spinup delay
@@ -1839,16 +1850,13 @@ struct nvram_81xx {
 	uint8_t reserved_21[16];
 	uint16_t reserved_22[3];
 
-	/*
-	 * BIT 0 = Extended BB credits for LR
-	 * BIT 1 = Virtual Fabric Enable
-	 * BIT 2 = Enhanced Features Unused
-	 * BIT 3-7 = Enhanced Features Reserved
+	/* Offset 406 (0x196) Enhanced Features
+	 * BIT 0    = Extended BB credits for LR
+	 * BIT 1    = Virtual Fabric Enable
+	 * BIT 2-5  = Distance Support if BIT 0 is on
+	 * BIT 6-15 = Unused
 	 */
-	/* Enhanced Features */
-	uint8_t enhanced_features;
-
-	uint8_t reserved_23;
+	uint16_t enhanced_features;
 	uint16_t reserved_24[4];
 
 	/* Offset 416. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index cadb6e3baacce3d395350526f63e5b18ad9e6e13..f852ca60c49fd182192efaeb6853bf5c379f015c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -9,17 +9,6 @@
 
 #include <linux/interrupt.h>
 
-/*
- * Global functions prototype in qla_nvme.c source file.
- */
-extern void qla_nvme_register_hba(scsi_qla_host_t *);
-extern int  qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *);
-extern void qla_nvme_delete(scsi_qla_host_t *);
-extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp);
-extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *,
-    struct req_que *);
-extern void qla24xx_async_gffid_sp_done(void *, int);
-
 /*
  * Global Function Prototypes in qla_init.c source file.
  */
@@ -116,13 +105,15 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
 int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
     void *);
 int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
-
+int qla24xx_detect_sfp(scsi_qla_host_t *vha);
+int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
 /*
  * Global Data in qla_os.c source file.
  */
 extern char qla2x00_version_str[];
 
 extern struct kmem_cache *srb_cachep;
+extern struct kmem_cache *qla_tgt_plogi_cachep;
 
 extern int ql2xlogintimeout;
 extern int qlport_down_retry;
@@ -153,6 +144,7 @@ extern int ql2xfwholdabts;
 extern int ql2xmvasynctoatio;
 extern int ql2xuctrlirq;
 extern int ql2xnvmeenable;
+extern int ql2xautodetectsfp;
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -495,6 +487,9 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
 int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
 	struct port_database_24xx *);
 
+extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
+extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
+
 /*
  * Global Function Prototypes in qla_isr.c source file.
  */
@@ -804,6 +799,7 @@ extern char *qdev_state(uint32_t);
 extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
 extern int qla82xx_read_temperature(scsi_qla_host_t *);
 extern int qla8044_read_temperature(scsi_qla_host_t *);
+extern int qla2x00_read_sfp_dev(struct scsi_qla_host *, char *, int);
 
 /* BSG related functions */
 extern int qla24xx_bsg_request(struct bsg_job *);
@@ -873,4 +869,6 @@ void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
 void qlt_remove_target_resources(struct qla_hw_data *);
 void qlt_clr_qp_table(struct scsi_qla_host *vha);
 
+void qla_nvme_cmpl_io(struct srb_iocb *);
+
 #endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index b323a7c71eda241eb9aa9e853f5c14ebd58293b5..bc3db6abc9a0ba3985befc378ae8dd2cb452652a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2816,13 +2816,19 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
 				case MODE_INITIATOR:
 				case MODE_DUAL:
 				default:
+					ql_dbg(ql_dbg_disc, vha, 0x201f,
+					    "%s %d %8phC post %s\n", __func__,
+					    __LINE__, fcport->port_name,
+					    (atomic_read(&fcport->state) ==
+					    FCS_ONLINE) ? "gpdb" : "gnl");
+
 					if (atomic_read(&fcport->state) ==
 					    FCS_ONLINE)
-						break;
-					ql_dbg(ql_dbg_disc, vha, 0x201f,
-					    "%s %d %8phC post gnl\n",
-					    __func__, __LINE__, fcport->port_name);
-					qla24xx_post_gnl_work(vha, fcport);
+						qla24xx_post_gpdb_work(vha,
+						    fcport, PDO_FORCE_ADISC);
+					else
+						qla24xx_post_gnl_work(vha,
+						    fcport);
 					break;
 				}
 			} else { /* fcport->d_id.b24 != ea->id.b24 */
@@ -3080,7 +3086,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
 		GPSC_RSP_SIZE);
 
 	/* GPSC req */
-	memcpy(ct_req->req.gpsc.port_name, fcport->port_name,
+	memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
 		WWN_SIZE);
 
 	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 072ad1aa55051e7b28d26ec13bd0e5105cb08da9..b5b48ddca9621d82bb9b7b674b8eb9f79f083fbe 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -36,7 +36,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *);
 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
 static int qla84xx_init_chip(scsi_qla_host_t *);
 static int qla25xx_init_queues(struct qla_hw_data *);
-static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
 static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
     struct event_arg *);
@@ -774,8 +773,7 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
 	return rval;
 }
 
-static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport,
-    u8 opt)
+int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 {
 	struct qla_work_evt *e;
 
@@ -808,6 +806,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 	if (!sp)
 		goto done;
 
+	sp->type = SRB_MB_IOCB;
+	sp->name = "gpdb";
+	sp->gen1 = fcport->rscn_gen;
+	sp->gen2 = fcport->login_gen;
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
 	pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
 	if (pd == NULL) {
 		ql_log(ql_log_warn, vha, 0xd043,
@@ -816,12 +820,6 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 	}
 	memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
 
-	sp->type = SRB_MB_IOCB;
-	sp->name = "gpdb";
-	sp->gen1 = fcport->rscn_gen;
-	sp->gen2 = fcport->login_gen;
-	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
 	mb = sp->u.iocb_cmd.u.mbx.out_mb;
 	mb[0] = MBC_GET_PORT_DATABASE;
 	mb[1] = fcport->loop_id;
@@ -1466,6 +1464,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 				__func__, __LINE__, ea->fcport->port_name);
 			ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
 			ea->fcport->logout_on_delete = 1;
+			ea->fcport->send_els_logo = 0;
 			qla24xx_post_gpdb_work(vha, ea->fcport, 0);
 		}
 		break;
@@ -2823,6 +2822,147 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
 	return QLA_SUCCESS;
 }
 
+#define PRINT_FIELD(_field, _flag, _str) {		\
+	if (a0->_field & _flag) {\
+		if (p) {\
+			strcat(ptr, "|");\
+			ptr++;\
+			leftover--;\
+		} \
+		len = snprintf(ptr, leftover, "%s", _str);	\
+		p = 1;\
+		leftover -= len;\
+		ptr += len; \
+	} \
+}
+
+static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
+{
+#define STR_LEN 64
+	struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
+	u8 str[STR_LEN], *ptr, p;
+	int leftover, len;
+
+	memset(str, 0, STR_LEN);
+	snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
+	ql_dbg(ql_dbg_init, vha, 0x015a,
+	    "SFP MFG Name: %s\n", str);
+
+	memset(str, 0, STR_LEN);
+	snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
+	ql_dbg(ql_dbg_init, vha, 0x015c,
+	    "SFP Part Name: %s\n", str);
+
+	/* media */
+	memset(str, 0, STR_LEN);
+	ptr = str;
+	leftover = STR_LEN;
+	p = len = 0;
+	PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
+	PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
+	PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
+	PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
+	PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
+	PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
+	PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
+	ql_dbg(ql_dbg_init, vha, 0x0160,
+	    "SFP Media: %s\n", str);
+
+	/* link length */
+	memset(str, 0, STR_LEN);
+	ptr = str;
+	leftover = STR_LEN;
+	p = len = 0;
+	PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
+	ql_dbg(ql_dbg_init, vha, 0x0196,
+	    "SFP Link Length: %s\n", str);
+
+	memset(str, 0, STR_LEN);
+	ptr = str;
+	leftover = STR_LEN;
+	p = len = 0;
+	PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
+	PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
+	PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
+	PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
+	ql_dbg(ql_dbg_init, vha, 0x016e,
+	    "SFP FC Link Tech: %s\n", str);
+
+	if (a0->length_km)
+		ql_dbg(ql_dbg_init, vha, 0x016f,
+		    "SFP Distant: %d km\n", a0->length_km);
+	if (a0->length_100m)
+		ql_dbg(ql_dbg_init, vha, 0x0170,
+		    "SFP Distant: %d m\n", a0->length_100m*100);
+	if (a0->length_50um_10m)
+		ql_dbg(ql_dbg_init, vha, 0x0189,
+		    "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
+	if (a0->length_62um_10m)
+		ql_dbg(ql_dbg_init, vha, 0x018a,
+		  "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
+	if (a0->length_om4_10m)
+		ql_dbg(ql_dbg_init, vha, 0x0194,
+		    "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
+	if (a0->length_om3_10m)
+		ql_dbg(ql_dbg_init, vha, 0x0195,
+		    "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
+}
+
+
+/*
+ * Return Code:
+ *   QLA_SUCCESS: no action
+ *   QLA_INTERFACE_ERROR: SFP is not there.
+ *   QLA_FUNCTION_FAILED: detected New SFP
+ */
+int
+qla24xx_detect_sfp(scsi_qla_host_t *vha)
+{
+	int rc = QLA_SUCCESS;
+	struct sff_8247_a0 *a;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!AUTO_DETECT_SFP_SUPPORT(vha))
+		goto out;
+
+	rc = qla2x00_read_sfp_dev(vha, NULL, 0);
+	if (rc)
+		goto out;
+
+	a = (struct sff_8247_a0 *)vha->hw->sfp_data;
+	qla2xxx_print_sfp_info(vha);
+
+	if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
+		/* long range */
+		ha->flags.detected_lr_sfp = 1;
+
+		if (a->length_km > 5 || a->length_100m > 50)
+			ha->long_range_distance = LR_DISTANCE_10K;
+		else
+			ha->long_range_distance = LR_DISTANCE_5K;
+
+		if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
+			ql_dbg(ql_dbg_async, vha, 0x507b,
+			    "Detected Long Range SFP.\n");
+	} else {
+		/* short range */
+		ha->flags.detected_lr_sfp = 0;
+		if (ha->flags.using_lr_setting)
+			ql_dbg(ql_dbg_async, vha, 0x5084,
+			    "Detected Short Range SFP.\n");
+	}
+
+	if (!vha->flags.init_done)
+		rc = QLA_SUCCESS;
+out:
+	return rc;
+}
+
 /**
  * qla2x00_setup_chip() - Load and start RISC firmware.
  * @ha: HA context
@@ -2879,6 +3019,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
 			rval = qla2x00_execute_fw(vha, srisc_address);
 			/* Retrieve firmware information. */
 			if (rval == QLA_SUCCESS) {
+				qla24xx_detect_sfp(vha);
+
 				rval = qla2x00_set_exlogins_buffer(vha);
 				if (rval != QLA_SUCCESS)
 					goto failed;
@@ -4609,24 +4751,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
 			qla2x00_fdmi_register(vha);
 
 		/* Ensure we are logged into the SNS. */
-		if (IS_FWI2_CAPABLE(ha))
-			loop_id = NPH_SNS;
-		else
-			loop_id = SIMPLE_NAME_SERVER;
+		loop_id = NPH_SNS_LID(ha);
 		rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
 		    0xfc, mb, BIT_1|BIT_0);
-		if (rval != QLA_SUCCESS) {
+		if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+			ql_dbg(ql_dbg_disc, vha, 0x20a1,
+			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
+			    loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 			return rval;
 		}
-		if (mb[0] != MBS_COMMAND_COMPLETE) {
-			ql_dbg(ql_dbg_disc, vha, 0x20a1,
-			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
-			    "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
-			    mb[2], mb[6], mb[7]);
-			return (QLA_SUCCESS);
-		}
-
 		if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
 			if (qla2x00_rft_id(vha)) {
 				/* EMPTY */
@@ -4804,6 +4938,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
 				new_fcport->fc4_type = swl[swl_idx].fc4_type;
 
 				new_fcport->nvme_flag = 0;
+				new_fcport->fc4f_nvme = 0;
 				if (vha->flags.nvme_enabled &&
 				    swl[swl_idx].fc4f_nvme) {
 					new_fcport->fc4f_nvme =
@@ -5913,7 +6048,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
 
 	if (!status) {
 		ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
-
+		qla2x00_configure_hba(vha);
 		spin_lock_irqsave(&ha->vport_slock, flags);
 		list_for_each_entry(vp, &ha->vp_list, list) {
 			if (vp->vp_idx) {
@@ -7804,7 +7939,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
 		ha->queue_pair_map[qpair_id] = qpair;
 		qpair->id = qpair_id;
 		qpair->vp_idx = vp_idx;
+		qpair->fw_started = ha->flags.fw_started;
 		INIT_LIST_HEAD(&qpair->hints_list);
+		INIT_LIST_HEAD(&qpair->nvme_done_list);
 		qpair->chip_reset = ha->base_qpair->chip_reset;
 		qpair->enable_class_2 = ha->base_qpair->enable_class_2;
 		qpair->enable_explicit_conf =
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a36c485fae50b94be5c569c791b0bb5e5991774a..2f94159186d7cefaa64eae4a20f3f60a5e14d23f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2682,12 +2682,12 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 	uint32_t        *cur_dsd;
 	struct scatterlist *sg;
 	int index;
-	uint16_t tot_dsds;
+	uint16_t cmd_dsds, rsp_dsds;
 	scsi_qla_host_t *vha = sp->vha;
 	struct qla_hw_data *ha = vha->hw;
 	struct bsg_job *bsg_job = sp->u.bsg_job;
-	int loop_iterartion = 0;
 	int entry_count = 1;
+	cont_a64_entry_t *cont_pkt = NULL;
 
 	ct_iocb->entry_type = CT_IOCB_TYPE;
         ct_iocb->entry_status = 0;
@@ -2698,30 +2698,46 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 	ct_iocb->vp_index = sp->vha->vp_idx;
 	ct_iocb->comp_status = cpu_to_le16(0);
 
-	ct_iocb->cmd_dsd_count =
-		cpu_to_le16(bsg_job->request_payload.sg_cnt);
+	cmd_dsds = bsg_job->request_payload.sg_cnt;
+	rsp_dsds = bsg_job->reply_payload.sg_cnt;
+
+	ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
         ct_iocb->timeout = 0;
-        ct_iocb->rsp_dsd_count =
-		cpu_to_le16(bsg_job->reply_payload.sg_cnt);
-        ct_iocb->rsp_byte_count =
-            cpu_to_le32(bsg_job->reply_payload.payload_len);
+	ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
         ct_iocb->cmd_byte_count =
             cpu_to_le32(bsg_job->request_payload.payload_len);
-        ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
-            (bsg_job->request_payload.sg_list)));
-        ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
-           (bsg_job->request_payload.sg_list)));
-        ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
-            (bsg_job->request_payload.sg_list));
 
-	avail_dsds = 1;
-	cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
+	avail_dsds = 2;
+	cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
 	index = 0;
-	tot_dsds = bsg_job->reply_payload.sg_cnt;
 
-	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
+	for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
+		dma_addr_t       sle_dma;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			 * Five DSDs are available in the Cont.
+			 * Type 1 IOCB.
+			 */
+			cont_pkt = qla2x00_prep_cont_type1_iocb(
+			    vha, ha->req_q_map[0]);
+			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+			entry_count++;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+
+	index = 0;
+
+	for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
 		dma_addr_t       sle_dma;
-		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets? */
 		if (avail_dsds == 0) {
@@ -2740,7 +2756,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
-		loop_iterartion++;
 		avail_dsds--;
 	}
         ct_iocb->entry_count = entry_count;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7b3b702ef6222cab385bb90d346f9eae679b1a95..9d9668aac6f6c479144a73428fac6049c5a9ac09 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -14,6 +14,8 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_bsg_fc.h>
 #include <scsi/scsi_eh.h>
+#include <scsi/fc/fc_fs.h>
+#include <linux/nvme-fc-driver.h>
 
 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
@@ -452,7 +454,7 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
 			uint16_t peg_fw_state, nw_interface_link_up;
 			uint16_t nw_interface_signal_detect, sfp_status;
 			uint16_t htbt_counter, htbt_monitor_enable;
-			uint16_t sfp_additonal_info, sfp_multirate;
+			uint16_t sfp_additional_info, sfp_multirate;
 			uint16_t sfp_tx_fault, link_speed, dcbx_status;
 
 			/*
@@ -492,7 +494,7 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
 			sfp_status = ((mb[2] & 0x0c00) >> 10);
 			htbt_counter = ((mb[2] & 0x7000) >> 12);
 			htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
-			sfp_additonal_info = (mb[6] & 0x0003);
+			sfp_additional_info = (mb[6] & 0x0003);
 			sfp_multirate = ((mb[6] & 0x0004) >> 2);
 			sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
 			link_speed = ((mb[6] & 0x0070) >> 4);
@@ -507,9 +509,9 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
 			    sfp_status);
 			ql_log(ql_log_warn, vha, 0x5067,
 			    "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
-			    "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
+			    "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
 			    htbt_counter, htbt_monitor_enable,
-			    sfp_additonal_info, sfp_multirate);
+			    sfp_additional_info, sfp_multirate);
 			ql_log(ql_log_warn, vha, 0x5068,
 			    "sfp_tx_fault=0x%x, link_state=0x%x, "
 			    "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
@@ -799,6 +801,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 
 		vha->flags.management_server_logged_in = 0;
 		qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
+
+		if (AUTO_DETECT_SFP_SUPPORT(vha)) {
+			set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		}
 		break;
 
 	case MBA_LOOP_DOWN:		/* Loop Down Event */
@@ -1228,6 +1235,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 			schedule_work(&ha->board_disable);
 		break;
 
+	case MBA_TRANS_INSERT:
+		ql_dbg(ql_dbg_async, vha, 0x5091,
+		    "Transceiver Insertion: %04x\n", mb[1]);
+		break;
+
 	default:
 		ql_dbg(ql_dbg_async, vha, 0x5057,
 		    "Unknown AEN:%04x %04x %04x %04x\n",
@@ -1537,8 +1549,6 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
 	if (!sp)
 		return;
-	bsg_job = sp->u.bsg_job;
-	bsg_reply = bsg_job->reply;
 
 	type = NULL;
 	switch (sp->type) {
@@ -1577,6 +1587,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
 	 * fc payload  to the caller
 	 */
+	bsg_job = sp->u.bsg_job;
+	bsg_reply = bsg_job->reply;
 	bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
 
@@ -1823,7 +1835,7 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
 	nvme = &sp->u.iocb_cmd;
 
 	if (unlikely(nvme->u.nvme.aen_op))
-		atomic_dec(&sp->vha->nvme_active_aen_cnt);
+		atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
 
 	/*
 	 * State flags: Bit 6 and 0.
@@ -1856,17 +1868,42 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
 	fd->transferred_length = fd->payload_length -
 	    le32_to_cpu(sts->residual_len);
 
+	/*
+	 * If transport error then Failure (HBA rejects request)
+	 * otherwise transport will handle.
+	 */
 	if (sts->entry_status) {
 		ql_log(ql_log_warn, fcport->vha, 0x5038,
 		    "NVME-%s error - hdl=%x entry-status(%x).\n",
 		    sp->name, sp->handle, sts->entry_status);
 		ret = QLA_FUNCTION_FAILED;
-	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
-		ql_log(ql_log_warn, fcport->vha, 0x5039,
-		    "NVME-%s error - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
-		    sp->name, sp->handle, sts->comp_status,
-		    le32_to_cpu(sts->residual_len), sts->ox_id);
-		ret = QLA_FUNCTION_FAILED;
+	} else  {
+		switch (le16_to_cpu(sts->comp_status)) {
+			case CS_COMPLETE:
+				ret = 0;
+			break;
+
+			case CS_ABORTED:
+			case CS_RESET:
+			case CS_PORT_UNAVAILABLE:
+			case CS_PORT_LOGGED_OUT:
+			case CS_PORT_BUSY:
+				ql_log(ql_log_warn, fcport->vha, 0x5060,
+				"NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
+				sp->name, sp->handle, sts->comp_status,
+				le32_to_cpu(sts->residual_len), sts->ox_id);
+				fd->transferred_length = fd->payload_length;
+				ret = QLA_ABORTED;
+			break;
+
+			default:
+				ql_log(ql_log_warn, fcport->vha, 0x5060,
+				"NVME-%s error - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
+				sp->name, sp->handle, sts->comp_status,
+				le32_to_cpu(sts->residual_len), sts->ox_id);
+				ret = QLA_FUNCTION_FAILED;
+				break;
+		}
 	}
 	sp->done(sp, ret);
 }
@@ -2827,8 +2864,8 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	sp->done(sp, 0);
 }
 
-void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt,
-    struct req_que *req)
+void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
+    struct pt_ls4_request *pkt, struct req_que *req)
 {
 	srb_t *sp;
 	const char func[] = "LS4_IOCB";
@@ -3132,7 +3169,6 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
 	struct device_reg_24xx __iomem *reg;
 	struct scsi_qla_host *vha;
 	unsigned long flags;
-	uint32_t stat = 0;
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
@@ -3146,19 +3182,11 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
 	vha = pci_get_drvdata(ha->pdev);
-	/*
-	 * Use host_status register to check to PCI disconnection before we
-	 * we process the response queue.
-	 */
-	stat = RD_REG_DWORD(&reg->host_status);
-	if (qla2x00_check_reg32_for_disconnect(vha, stat))
-		goto out;
 	qla24xx_process_response_queue(vha, rsp);
 	if (!ha->flags.disable_msix_handshake) {
 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 		RD_REG_DWORD_RELAXED(&reg->hccr);
 	}
-out:
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	return IRQ_HANDLED;
@@ -3429,7 +3457,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 	}
 
 	/* Enable MSI-X vector for response queue update for queue 0 */
-	if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		if (ha->msixbase && ha->mqiobase &&
 		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
 		     ql2xmqsupport))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7c6d1a40401121dce41279d32adce5104147b203..99502fa908103f39d10cd1930d994f9067f445a4 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -54,6 +54,10 @@ static struct rom_cmd {
 	{ MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
 	{ MBC_GET_RETRY_COUNT },
 	{ MBC_TRACE_CONTROL },
+	{ MBC_INITIALIZE_MULTIQ },
+	{ MBC_IOCB_COMMAND_A64 },
+	{ MBC_GET_ADAPTER_LOOP_ID },
+	{ MBC_READ_SFP },
 };
 
 static int is_rom_cmd(uint16_t cmd)
@@ -102,7 +106,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 	uint16_t __iomem *optr;
 	uint32_t	cnt;
 	uint32_t	mboxes;
-	uint16_t __iomem *mbx_reg;
 	unsigned long	wait_time;
 	struct qla_hw_data *ha = vha->hw;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
@@ -486,21 +489,24 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 
 mbx_done:
 	if (rval) {
-		ql_dbg(ql_dbg_disc, base_vha, 0x1020,
-		    "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
-		    mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
-
+		if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
+			pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
+			    dev_name(&ha->pdev->dev), 0x1020+0x800,
+			    vha->host_no);
+			mboxes = mcp->in_mb;
+			cnt = 4;
+			for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
+				if (mboxes & BIT_0) {
+					printk(" mb[%u]=%x", i, mcp->mb[i]);
+					cnt--;
+				}
+			pr_warn(" cmd=%x ****\n", command);
+		}
 		ql_dbg(ql_dbg_mbx, vha, 0x1198,
-		    "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n",
+		    "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
 		    RD_REG_DWORD(&reg->isp24.host_status),
-		    ha->fw_dump_cap_flags,
 		    RD_REG_DWORD(&reg->isp24.ictrl),
 		    RD_REG_DWORD(&reg->isp24.istatus));
-
-		mbx_reg = &reg->isp24.mailbox0;
-		for (i = 0; i < 6; i++)
-			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1199,
-			    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
 	} else {
 		ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
 	}
@@ -561,6 +567,28 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
 
 #define	EXTENDED_BB_CREDITS	BIT_0
 #define	NVME_ENABLE_FLAG	BIT_3
+static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
+{
+	uint16_t mb4 = BIT_0;
+
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
+
+	return mb4;
+}
+
+static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
+{
+	uint16_t mb4 = BIT_0;
+
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		struct nvram_81xx *nv = ha->nvram;
+
+		mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
+	}
+
+	return mb4;
+}
 
 /*
  * qla2x00_execute_fw
@@ -595,17 +623,44 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
 		mcp->mb[1] = MSW(risc_addr);
 		mcp->mb[2] = LSW(risc_addr);
 		mcp->mb[3] = 0;
+		mcp->mb[4] = 0;
+		ha->flags.using_lr_setting = 0;
 		if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
 		    IS_QLA27XX(ha)) {
-			struct nvram_81xx *nv = ha->nvram;
-			mcp->mb[4] = (nv->enhanced_features &
-			    EXTENDED_BB_CREDITS);
-		} else
-			mcp->mb[4] = 0;
+			if (ql2xautodetectsfp) {
+				if (ha->flags.detected_lr_sfp) {
+					mcp->mb[4] |=
+					    qla25xx_set_sfp_lr_dist(ha);
+					ha->flags.using_lr_setting = 1;
+				}
+			} else {
+				struct nvram_81xx *nv = ha->nvram;
+				/* set LR distance if specified in nvram */
+				if (nv->enhanced_features &
+				    NEF_LR_DIST_ENABLE) {
+					mcp->mb[4] |=
+					    qla25xx_set_nvr_lr_dist(ha);
+					ha->flags.using_lr_setting = 1;
+				}
+			}
+		}
 
 		if (ql2xnvmeenable && IS_QLA27XX(ha))
 			mcp->mb[4] |= NVME_ENABLE_FLAG;
 
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+			struct nvram_81xx *nv = ha->nvram;
+			/* set minimum speed if specified in nvram */
+			if (nv->min_link_speed >= 2 &&
+			    nv->min_link_speed <= 5) {
+				mcp->mb[4] |= BIT_4;
+				mcp->mb[11] = nv->min_link_speed;
+				mcp->out_mb |= MBX_11;
+				mcp->in_mb |= BIT_5;
+				vha->min_link_speed_feat = nv->min_link_speed;
+			}
+		}
+
 		if (ha->flags.exlogins_enabled)
 			mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
 
@@ -613,7 +668,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
 			mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
 
 		mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
-		mcp->in_mb |= MBX_1;
+		mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
 	} else {
 		mcp->mb[1] = LSW(risc_addr);
 		mcp->out_mb |= MBX_1;
@@ -632,12 +687,30 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
 	} else {
 		if (IS_FWI2_CAPABLE(ha)) {
-			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
-			    "Done exchanges=%x.\n", mcp->mb[1]);
-		} else {
-			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
-			    "Done %s.\n", __func__);
+			ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
+			ql_dbg(ql_dbg_mbx, vha, 0x119a,
+			    "fw_ability_mask=%x.\n", ha->fw_ability_mask);
+			ql_dbg(ql_dbg_mbx, vha, 0x1027,
+			    "exchanges=%x.\n", mcp->mb[1]);
+			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+				ha->max_speed_sup = mcp->mb[2] & BIT_0;
+				ql_dbg(ql_dbg_mbx, vha, 0x119b,
+				    "Maximum speed supported=%s.\n",
+				    ha->max_speed_sup ? "32Gps" : "16Gps");
+				if (vha->min_link_speed_feat) {
+					ha->min_link_speed = mcp->mb[5];
+					ql_dbg(ql_dbg_mbx, vha, 0x119c,
+					    "Minimum speed set=%s.\n",
+					    mcp->mb[5] == 5 ? "32Gps" :
+					    mcp->mb[5] == 4 ? "16Gps" :
+					    mcp->mb[5] == 3 ? "8Gps" :
+					    mcp->mb[5] == 2 ? "4Gps" :
+						"unknown");
+				}
+			}
 		}
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+		    "Done.\n");
 	}
 
 	return rval;
@@ -947,20 +1020,12 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
 			    "%s: Firmware supports Exchange Offload 0x%x\n",
 			    __func__, ha->fw_attributes_h);
 
-		/* bit 26 of fw_attributes */
-		if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
-			struct init_cb_24xx *icb;
-
-			icb = (struct init_cb_24xx *)ha->init_cb;
-			/*
-			 * fw supports nvme and driver load
-			 * parameter requested nvme
-			 */
+		/*
+		 * FW supports nvme and driver load parameter requested nvme.
+		 * BIT 26 of fw_attributes indicates NVMe support.
+		 */
+		if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable)
 			vha->flags.nvme_enabled = 1;
-			icb->firmware_options_2 &= cpu_to_le32(~0xf);
-			ha->zio_mode = 0;
-			ha->zio_timer = 0;
-		}
 
 	}
 
@@ -1673,7 +1738,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
 		    "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
 	} else {
-		/*EMPTY*/
+		if (IS_QLA27XX(ha)) {
+			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
+				ql_dbg(ql_dbg_mbx, vha, 0x119d,
+				    "Invalid SFP/Validation Failed\n");
+		}
 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
 		    "Done %s.\n", __func__);
 	}
@@ -1878,6 +1947,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
 	    "Entered %s.\n", __func__);
@@ -1906,7 +1976,11 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
 		/*EMPTY*/
 		ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
 	} else {
-		/*EMPTY*/
+		if (IS_QLA27XX(ha)) {
+			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
+				ql_dbg(ql_dbg_mbx, vha, 0x119e,
+				    "Invalid SFP/Validation Failed\n");
+		}
 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
 		    "Done %s.\n", __func__);
 	}
@@ -3689,7 +3763,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
 				if (qla_ini_mode_enabled(vha) &&
 				    ha->flags.fawwpn_enabled &&
 				    (rptid_entry->u.f1.flags &
-				     VP_FLAGS_NAME_VALID)) {
+				     BIT_6)) {
 					memcpy(vha->port_name,
 					    rptid_entry->u.f1.port_name,
 					    WWN_SIZE);
@@ -4590,6 +4664,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+		if (mcp->mb[0] == MBS_COMMAND_ERROR &&
+		    mcp->mb[1] == 0x22)
+			/* sfp is not there */
+			rval = QLA_INTERFACE_ERROR;
 	} else {
 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
 		    "Done %s.\n", __func__);
@@ -5817,7 +5895,7 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
 
 	dd_dma = dma_map_single(&vha->hw->pdev->dev,
 	    dd_buf, size, DMA_FROM_DEVICE);
-	if (!dd_dma) {
+	if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
 		ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
 		return QLA_MEMORY_ALLOC_FAILED;
 	}
@@ -6085,3 +6163,108 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
 done:
 	return rval;
 }
+
+int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
+{
+	int rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
+	    "Entered %s\n", __func__);
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
+	mcp->mb[1] = cpu_to_le16(1);
+	mcp->mb[2] = cpu_to_le16(value);
+	mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
+	mcp->in_mb = MBX_2 | MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
+	    (rval != QLA_SUCCESS) ? "Failed"  : "Done", rval);
+
+	return rval;
+}
+
+int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
+{
+	int rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
+	    "Entered %s\n", __func__);
+
+	memset(mcp->mb, 0, sizeof(mcp->mb));
+	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
+	mcp->mb[1] = cpu_to_le16(0);
+	mcp->out_mb = MBX_1 | MBX_0;
+	mcp->in_mb = MBX_2 | MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval == QLA_SUCCESS)
+		*value = mc.mb[2];
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
+	    (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
+
+	return rval;
+}
+
+int
+qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t iter, addr, offset;
+	dma_addr_t phys_addr;
+	int rval, c;
+	u8 *sfp_data;
+
+	memset(ha->sfp_data, 0, SFP_DEV_SIZE);
+	addr = 0xa0;
+	phys_addr = ha->sfp_data_dma;
+	sfp_data = ha->sfp_data;
+	offset = c = 0;
+
+	for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
+		if (iter == 4) {
+			/* Skip to next device address. */
+			addr = 0xa2;
+			offset = 0;
+		}
+
+		rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
+		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x706d,
+			    "Unable to read SFP data (%x/%x/%x).\n", rval,
+			    addr, offset);
+
+			return rval;
+		}
+
+		if (buf && (c < count)) {
+			u16 sz;
+
+			if ((count - c) >= SFP_BLOCK_SIZE)
+				sz = SFP_BLOCK_SIZE;
+			else
+				sz = count - c;
+
+			memcpy(buf, sfp_data, sz);
+			buf += SFP_BLOCK_SIZE;
+			c += sz;
+		}
+		phys_addr += SFP_BLOCK_SIZE;
+		sfp_data  += SFP_BLOCK_SIZE;
+		offset += SFP_BLOCK_SIZE;
+	}
+
+	return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f0605cd196fb4e84c35bf4f59879af5b268924a5..c0f8f6c17b793384cd9f9c70ef34f8ec945642e7 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,7 +74,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
 	 * ensures no active vp_list traversal while the vport is removed
 	 * from the queue)
 	 */
-	wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+	wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
 	    10*HZ);
 
 	spin_lock_irqsave(&ha->vport_slock, flags);
@@ -187,6 +187,11 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
 		!(ha->current_topology & ISP_CFG_F)) {
 		vha->vp_err_state =  VP_ERR_PORTDWN;
 		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
+		ql_dbg(ql_dbg_taskm, vha, 0x800b,
+		    "%s skip enable. loop_state %x topo %x\n",
+		    __func__, base_vha->loop_state.counter,
+		    ha->current_topology);
+
 		goto enable_failed;
 	}
 
@@ -759,11 +764,18 @@ static void qla_do_work(struct work_struct *work)
 	struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
 	struct scsi_qla_host *vha;
 	struct qla_hw_data *ha = qpair->hw;
+	struct srb_iocb	*nvme, *nxt_nvme;
 
 	spin_lock_irqsave(&qpair->qp_lock, flags);
 	vha = pci_get_drvdata(ha->pdev);
 	qla24xx_process_response_queue(vha, qpair->rsp);
 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+	list_for_each_entry_safe(nvme, nxt_nvme, &qpair->nvme_done_list,
+		    u.nvme.entry) {
+		list_del_init(&nvme->u.nvme.entry);
+		qla_nvme_cmpl_io(nvme);
+	}
 }
 
 /* create response queue */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 10b742d27e16467e4bd06f40bd030dd60a766b61..e23a3d4c36f39b81f2594b95582018f16a59931b 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1819,6 +1819,10 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
 	if (!sp)
 		goto done;
 
+	sp->type = SRB_FXIOCB_DCMD;
+	sp->name = "fxdisc";
+	qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+
 	fdisc = &sp->u.iocb_cmd;
 	switch (fx_type) {
 	case FXDISC_GET_CONFIG_INFO:
@@ -1920,9 +1924,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
 			goto done_unmap_req;
 	}
 
-	sp->type = SRB_FXIOCB_DCMD;
-	sp->name = "fxdisc";
-	qla2x00_init_timer(sp, FXDISC_TIMEOUT);
 	fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
 	fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
 	sp->done = qla2x00_fxdisc_sp_done;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index f3710a75fe1fc0eb95c57284014a1930223afcc7..1f59e7a74c7b7b9dc7db660e6114420bc90ce8b7 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -5,7 +5,6 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_nvme.h"
-#include "qla_def.h"
 #include <linux/scatterlist.h>
 #include <linux/delay.h>
 #include <linux/nvme.h>
@@ -15,7 +14,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport;
 
 static void qla_nvme_unregister_remote_port(struct work_struct *);
 
-int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport)
+int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
 {
 	struct nvme_rport *rport;
 	int ret;
@@ -61,8 +60,8 @@ int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport)
 	rport->req.port_id = fcport->d_id.b24;
 
 	ql_log(ql_log_info, vha, 0x2102,
-	    "%s: traddr=pn-0x%016llx:nn-0x%016llx PortID:%06x\n",
-	    __func__, rport->req.port_name, rport->req.node_name,
+	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
+	    __func__, rport->req.node_name, rport->req.port_name,
 	    rport->req.port_id);
 
 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
@@ -76,16 +75,14 @@ int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport)
 
 	fcport->nvme_remote_port->private = fcport;
 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
-	atomic_set(&fcport->nvme_ref_count, 1);
-	init_waitqueue_head(&fcport->nvme_waitQ);
 	rport->fcport = fcport;
 	list_add_tail(&rport->list, &vha->nvme_rport_list);
 	return 0;
 }
 
 /* Allocate a queue for NVMe traffic */
-static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, unsigned int qidx,
-    u16 qsize, void **handle)
+static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
+    unsigned int qidx, u16 qsize, void **handle)
 {
 	struct scsi_qla_host *vha;
 	struct qla_hw_data *ha;
@@ -157,6 +154,16 @@ static void qla_nvme_sp_ls_done(void *ptr, int res)
 	qla2x00_rel_sp(sp);
 }
 
+void qla_nvme_cmpl_io(struct srb_iocb *nvme)
+{
+	srb_t *sp;
+	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+
+	sp = container_of(nvme, srb_t, u.iocb_cmd);
+	fd->done(fd);
+	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
 static void qla_nvme_sp_done(void *ptr, int res)
 {
 	srb_t *sp = ptr;
@@ -172,13 +179,14 @@ static void qla_nvme_sp_done(void *ptr, int res)
 	if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
 		goto rel;
 
-	if (unlikely(nvme->u.nvme.comp_status || res))
-		fd->status = -EINVAL;
+	if (unlikely(res == QLA_FUNCTION_FAILED))
+		fd->status = NVME_SC_FC_TRANSPORT_ERROR;
 	else
 		fd->status = 0;
 
 	fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
-	fd->done(fd);
+	list_add_tail(&nvme->u.nvme.entry, &sp->qpair->nvme_done_list);
+	return;
 rel:
 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
 }
@@ -193,13 +201,11 @@ static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
 	struct qla_hw_data *ha = fcport->vha->hw;
 
 	rval = ha->isp_ops->abort_command(sp);
-	if (rval != QLA_SUCCESS)
-		ql_log(ql_log_warn, fcport->vha, 0x2125,
-		    "%s: failed to abort LS command for SP:%p rval=%x\n",
-		    __func__, sp, rval);
 
 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
-	    "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport);
+	    "%s: %s LS command for sp=%p on fcport=%p rval=%x\n", __func__,
+	    (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
+	    sp, fcport, rval);
 }
 
 static void qla_nvme_ls_complete(struct work_struct *work)
@@ -214,7 +220,7 @@ static void qla_nvme_ls_complete(struct work_struct *work)
 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
 {
-	fc_port_t *fcport = (fc_port_t *)rport->private;
+	fc_port_t *fcport = rport->private;
 	struct srb_iocb   *nvme;
 	struct nvme_private *priv = fd->private;
 	struct scsi_qla_host *vha;
@@ -236,7 +242,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
 	sp->name = "nvme_ls";
 	sp->done = qla_nvme_sp_ls_done;
 	atomic_set(&sp->ref_count, 1);
-	init_waitqueue_head(&sp->nvme_ls_waitQ);
 	nvme = &sp->u.iocb_cmd;
 	priv->sp = sp;
 	priv->fd = fd;
@@ -258,7 +263,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
 		ql_log(ql_log_warn, vha, 0x700e,
 		    "qla2x00_start_sp failed = %d\n", rval);
 		atomic_dec(&sp->ref_count);
-		wake_up(&sp->nvme_ls_waitQ);
+		wake_up(&sp->nvme_ls_waitq);
 		return rval;
 	}
 
@@ -276,20 +281,18 @@ static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
 	struct qla_hw_data *ha = fcport->vha->hw;
 
 	rval = ha->isp_ops->abort_command(sp);
-	if (!rval)
-		ql_log(ql_log_warn, fcport->vha, 0x2127,
-		    "%s: failed to abort command for SP:%p rval=%x\n",
-		    __func__, sp, rval);
 
-	ql_dbg(ql_dbg_io, fcport->vha, 0x2126,
-	    "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport);
+	ql_dbg(ql_dbg_io, fcport->vha, 0x2127,
+	    "%s: %s command for sp=%p on fcport=%p rval=%x\n", __func__,
+	    (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
+	    sp, fcport, rval);
 }
 
 static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
 {
 	struct scsi_qla_host *vha = lport->private;
 	unsigned long flags;
-	struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
+	struct qla_qpair *qpair = hw_queue_handle;
 
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&qpair->qp_lock, flags);
@@ -310,6 +313,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
 	uint16_t	avail_dsds;
 	uint32_t	*cur_dsd;
 	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
 	struct scsi_qla_host *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_qpair *qpair = sp->qpair;
@@ -318,13 +322,15 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
 	uint32_t        rval = QLA_SUCCESS;
 
-	/* Setup qpair pointers */
-	req = qpair->req;
 	tot_dsds = fd->sg_cnt;
 
 	/* Acquire qpair specific lock */
 	spin_lock_irqsave(&qpair->qp_lock, flags);
 
+	/* Setup qpair pointers */
+	req = qpair->req;
+	rsp = qpair->rsp;
+
 	/* Check for room in outstanding command list. */
 	handle = req->current_outstanding_cmd;
 	for (index = 1; index < req->num_outstanding_cmds; index++) {
@@ -359,7 +365,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
 		struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
 			nvme->u.nvme.aen_op = 1;
-			atomic_inc(&vha->nvme_active_aen_cnt);
+			atomic_inc(&vha->hw->nvme_active_aen_cnt);
 		}
 	}
 
@@ -472,6 +478,11 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
 	/* Set chip new ring index. */
 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
 
+	/* Manage unprocessed RIO/ZIO commands in response queue. */
+	if (vha->flags.process_response_queue &&
+	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+		qla24xx_process_response_queue(vha, rsp);
+
 queuing_error:
 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
 	return rval;
@@ -487,7 +498,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
 	struct scsi_qla_host *vha;
 	int rval = QLA_FUNCTION_FAILED;
 	srb_t *sp;
-	struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
+	struct qla_qpair *qpair = hw_queue_handle;
 	struct nvme_private *priv;
 
 	if (!fd) {
@@ -496,7 +507,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
 	}
 
 	priv = fd->private;
-	fcport = (fc_port_t *)rport->private;
+	fcport = rport->private;
 	if (!fcport) {
 		ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
 		return rval;
@@ -512,7 +523,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
 		return -EIO;
 
 	atomic_set(&sp->ref_count, 1);
-	init_waitqueue_head(&sp->nvme_ls_waitQ);
+	init_waitqueue_head(&sp->nvme_ls_waitq);
 	priv->sp = sp;
 	sp->type = SRB_NVME_CMD;
 	sp->name = "nvme_cmd";
@@ -526,7 +537,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
 		ql_log(ql_log_warn, vha, 0x212d,
 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
 		atomic_dec(&sp->ref_count);
-		wake_up(&sp->nvme_ls_waitQ);
+		wake_up(&sp->nvme_ls_waitq);
 		return -EIO;
 	}
 
@@ -537,12 +548,10 @@ static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
 {
 	struct scsi_qla_host *vha = lport->private;
 
-	atomic_dec(&vha->nvme_ref_count);
-	wake_up_all(&vha->nvme_waitQ);
-
 	ql_log(ql_log_info, vha, 0x210f,
 	    "localport delete of %p completed.\n", vha->nvme_local_port);
 	vha->nvme_local_port = NULL;
+	complete(&vha->nvme_del_done);
 }
 
 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
@@ -550,11 +559,9 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
 	fc_port_t *fcport;
 	struct nvme_rport *r_port, *trport;
 
-	fcport = (fc_port_t *)rport->private;
+	fcport = rport->private;
 	fcport->nvme_remote_port = NULL;
 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
-	atomic_dec(&fcport->nvme_ref_count);
-	wake_up_all(&fcport->nvme_waitQ);
 
 	list_for_each_entry_safe(r_port, trport,
 	    &fcport->vha->nvme_rport_list, list) {
@@ -564,6 +571,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
 		}
 	}
 	kfree(r_port);
+	complete(&fcport->nvme_del_done);
 
 	ql_log(ql_log_info, fcport->vha, 0x2110,
 	    "remoteport_delete of %p completed.\n", fcport);
@@ -594,7 +602,7 @@ static int qla_nvme_wait_on_command(srb_t *sp)
 {
 	int ret = QLA_SUCCESS;
 
-	wait_event_timeout(sp->nvme_ls_waitQ, (atomic_read(&sp->ref_count) > 1),
+	wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
 	    NVME_ABORT_POLLING_PERIOD*HZ);
 
 	if (atomic_read(&sp->ref_count) > 1)
@@ -606,12 +614,11 @@ static int qla_nvme_wait_on_command(srb_t *sp)
 static int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
 {
 	int ret = QLA_SUCCESS;
+	int timeout;
 
-	wait_event_timeout(fcport->nvme_waitQ,
-	    atomic_read(&fcport->nvme_ref_count),
-	    NVME_ABORT_POLLING_PERIOD*HZ);
-
-	if (atomic_read(&fcport->nvme_ref_count)) {
+	timeout = wait_for_completion_timeout(&fcport->nvme_del_done,
+	    msecs_to_jiffies(2000));
+	if (!timeout) {
 		ret = QLA_FUNCTION_FAILED;
 		ql_log(ql_log_info, fcport->vha, 0x2111,
 		    "timed out waiting for fcport=%p to delete\n", fcport);
@@ -620,50 +627,14 @@ static int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
 	return ret;
 }
 
-void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp)
+void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp)
 {
 	int rval;
 
 	rval = ha->isp_ops->abort_command(sp);
-	if (!rval) {
-		if (!qla_nvme_wait_on_command(sp))
-			ql_log(ql_log_warn, NULL, 0x2112,
-			    "nvme_wait_on_command timed out waiting on sp=%p\n",
-			    sp);
-	}
-}
-
-static void qla_nvme_abort_all(fc_port_t *fcport)
-{
-	int que, cnt;
-	unsigned long flags;
-	srb_t *sp;
-	struct qla_hw_data *ha = fcport->vha->hw;
-	struct req_que *req;
-
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	for (que = 0; que < ha->max_req_queues; que++) {
-		req = ha->req_q_map[que];
-		if (!req)
-			continue;
-		if (!req->outstanding_cmds)
-			continue;
-		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
-			sp = req->outstanding_cmds[cnt];
-			if ((sp) && ((sp->type == SRB_NVME_CMD) ||
-			    (sp->type == SRB_NVME_LS)) &&
-				(sp->fcport == fcport)) {
-				atomic_inc(&sp->ref_count);
-				spin_unlock_irqrestore(&ha->hardware_lock,
-				    flags);
-				qla_nvme_abort(ha, sp);
-				spin_lock_irqsave(&ha->hardware_lock, flags);
-				req->outstanding_cmds[cnt] = NULL;
-				sp->done(sp, 1);
-			}
-		}
-	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (!rval && !qla_nvme_wait_on_command(sp))
+		ql_log(ql_log_warn, NULL, 0x2112,
+		    "nvme_wait_on_comand timed out waiting on sp=%p\n", sp);
 }
 
 static void qla_nvme_unregister_remote_port(struct work_struct *work)
@@ -675,18 +646,23 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work)
 	if (!IS_ENABLED(CONFIG_NVME_FC))
 		return;
 
+	ql_log(ql_log_warn, NULL, 0x2112,
+	    "%s: unregister remoteport on %p\n",__func__, fcport);
+
 	list_for_each_entry_safe(rport, trport,
 	    &fcport->vha->nvme_rport_list, list) {
 		if (rport->fcport == fcport) {
 			ql_log(ql_log_info, fcport->vha, 0x2113,
 			    "%s: fcport=%p\n", __func__, fcport);
+			init_completion(&fcport->nvme_del_done);
 			nvme_fc_unregister_remoteport(
 			    fcport->nvme_remote_port);
+			qla_nvme_wait_on_rport_del(fcport);
 		}
 	}
 }
 
-void qla_nvme_delete(scsi_qla_host_t *vha)
+void qla_nvme_delete(struct scsi_qla_host *vha)
 {
 	struct nvme_rport *rport, *trport;
 	fc_port_t *fcport;
@@ -701,12 +677,13 @@ void qla_nvme_delete(scsi_qla_host_t *vha)
 		ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
 		    __func__, fcport);
 
+		init_completion(&fcport->nvme_del_done);
 		nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
 		qla_nvme_wait_on_rport_del(fcport);
-		qla_nvme_abort_all(fcport);
 	}
 
 	if (vha->nvme_local_port) {
+		init_completion(&vha->nvme_del_done);
 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
 		if (nv_ret == 0)
 			ql_log(ql_log_info, vha, 0x2116,
@@ -715,10 +692,12 @@ void qla_nvme_delete(scsi_qla_host_t *vha)
 		else
 			ql_log(ql_log_info, vha, 0x2115,
 			    "Unregister of localport failed\n");
+		wait_for_completion_timeout(&vha->nvme_del_done,
+		    msecs_to_jiffies(5000));
 	}
 }
 
-void qla_nvme_register_hba(scsi_qla_host_t *vha)
+void qla_nvme_register_hba(struct scsi_qla_host *vha)
 {
 	struct nvme_fc_port_template *tmpl;
 	struct qla_hw_data *ha;
@@ -744,8 +723,8 @@ void qla_nvme_register_hba(scsi_qla_host_t *vha)
 	pinfo.port_id = vha->d_id.b24;
 
 	ql_log(ql_log_info, vha, 0xffff,
-	    "register_localport: host-traddr=pn-0x%llx:nn-0x%llx on portID:%x\n",
-	    pinfo.port_name, pinfo.node_name, pinfo.port_id);
+	    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
+	    pinfo.node_name, pinfo.port_name, pinfo.port_id);
 	qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
 
 	ret = nvme_fc_register_localport(&pinfo, tmpl,
@@ -755,7 +734,5 @@ void qla_nvme_register_hba(scsi_qla_host_t *vha)
 		    "register_localport failed: ret=%x\n", ret);
 		return;
 	}
-	atomic_set(&vha->nvme_ref_count, 1);
 	vha->nvme_local_port->private = vha;
-	init_waitqueue_head(&vha->nvme_waitQ);
 }
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index dfe56f207b28dc540e5549c8565ab36df9cc7d8b..7f05fa1c77dbd1c842bd16cf3c5f129665a42b7b 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -12,12 +12,18 @@
 #include <uapi/scsi/fc/fc_els.h>
 #include <linux/nvme-fc-driver.h>
 
+#include "qla_def.h"
+
 #define NVME_ATIO_CMD_OFF 32
 #define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
 #define Q2T_NVME_NUM_TAGS 2048
 #define QLA_MAX_FC_SEGMENTS 64
 
+struct scsi_qla_host;
+struct qla_hw_data;
+struct req_que;
 struct srb;
+
 struct nvme_private {
 	struct srb	*sp;
 	struct nvmefc_ls_req *fd;
@@ -129,4 +135,15 @@ struct pt_ls4_rx_unsol {
 	uint32_t desc_len;
 	uint32_t payload[3];
 };
+
+/*
+ * Global functions prototype in qla_nvme.c source file.
+ */
+void qla_nvme_register_hba(struct scsi_qla_host *);
+int  qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
+void qla_nvme_delete(struct scsi_qla_host *);
+void qla_nvme_abort(struct qla_hw_data *, struct srb *sp);
+void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *,
+    struct req_que *);
+void qla24xx_async_gffid_sp_done(void *, int);
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index df57655779edc0f047f65299bba126272567a224..5b2437a5ea440a5d3a6836115bcf444595d8f7ef 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -113,12 +113,12 @@ MODULE_PARM_DESC(ql2xfdmienable,
 		"Enables FDMI registrations. "
 		"0 - no FDMI. Default is 1 - perform FDMI.");
 
-#define MAX_Q_DEPTH	32
+#define MAX_Q_DEPTH	64
 static int ql2xmaxqdepth = MAX_Q_DEPTH;
 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xmaxqdepth,
 		"Maximum queue depth to set for each LUN. "
-		"Default is 32.");
+		"Default is 64.");
 
 #if (IS_ENABLED(CONFIG_NVME_FC))
 int ql2xenabledif;
@@ -200,7 +200,7 @@ MODULE_PARM_DESC(ql2xgffidenable,
 		"Enables GFF_ID checks of port type. "
 		"Default is 0 - Do not use GFF_ID information.");
 
-int ql2xasynctmfenable;
+int ql2xasynctmfenable = 1;
 module_param(ql2xasynctmfenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xasynctmfenable,
 		"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
@@ -262,6 +262,12 @@ MODULE_PARM_DESC(ql2xmvasynctoatio,
 		"0 (Default). Do not move IOCBs"
 		"1 - Move IOCBs.");
 
+int ql2xautodetectsfp = 1;
+module_param(ql2xautodetectsfp, int, 0444);
+MODULE_PARM_DESC(ql2xautodetectsfp,
+		 "Detect SFP range and set appropriate distance.\n"
+		 "1 (Default): Enable\n");
+
 /*
  * SCSI host template entry points
  */
@@ -379,6 +385,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
 	ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
 	ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
 	INIT_LIST_HEAD(&ha->base_qpair->hints_list);
+	INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
 	ha->base_qpair->enable_class_2 = ql2xenableclass2;
 	/* init qpair to this cpu. Will adjust at run time. */
 	qla_cpu_update(rsp->qpair, smp_processor_id());
@@ -710,7 +717,7 @@ qla2x00_sp_free_dma(void *ptr)
 	}
 
 end:
-	if ((sp->type != SRB_NVME_CMD) && (sp->type != SRB_NVME_LS)) {
+	if (sp->type != SRB_NVME_CMD && sp->type != SRB_NVME_LS) {
 		CMD_SP(cmd) = NULL;
 		qla2x00_rel_sp(sp);
 	}
@@ -735,7 +742,7 @@ qla2x00_sp_compl(void *ptr, int res)
 	if (!atomic_dec_and_test(&sp->ref_count))
 		return;
 
-	qla2x00_sp_free_dma(sp);
+	sp->free(sp);
 	cmd->scsi_done(cmd);
 }
 
@@ -807,7 +814,7 @@ qla2xxx_qpair_sp_compl(void *ptr, int res)
 	if (!atomic_dec_and_test(&sp->ref_count))
 		return;
 
-	qla2xxx_qpair_sp_free_dma(sp);
+	sp->free(sp);
 	cmd->scsi_done(cmd);
 }
 
@@ -928,7 +935,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 	return 0;
 
 qc24_host_busy_free_sp:
-	qla2x00_sp_free_dma(sp);
+	sp->free(sp);
 
 qc24_host_busy:
 	return SCSI_MLQUEUE_HOST_BUSY;
@@ -1017,7 +1024,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
 	return 0;
 
 qc24_host_busy_free_sp:
-	qla2xxx_qpair_sp_free_dma(sp);
+	sp->free(sp);
 
 qc24_host_busy:
 	return SCSI_MLQUEUE_HOST_BUSY;
@@ -1134,7 +1141,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 {
 	qla2x00_mark_all_devices_lost(vha, 0);
 
-	wait_event(vha->fcport_waitQ, test_fcport_count(vha));
+	wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ);
 }
 
 /*
@@ -1715,8 +1722,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 			if (sp) {
 				req->outstanding_cmds[cnt] = NULL;
 				if (sp->cmd_type == TYPE_SRB) {
-					if ((sp->type == SRB_NVME_CMD) ||
-					    (sp->type == SRB_NVME_LS)) {
+					if (sp->type == SRB_NVME_CMD ||
+					    sp->type == SRB_NVME_LS) {
 						sp_get(sp);
 						spin_unlock_irqrestore(
 						    &ha->hardware_lock, flags);
@@ -1725,6 +1732,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 						    &ha->hardware_lock, flags);
 					} else if (GET_CMD_SP(sp) &&
 					    !ha->flags.eeh_busy &&
+					    (!test_bit(ABORT_ISP_ACTIVE,
+						&vha->dpc_flags)) &&
 					    (sp->type == SRB_SCSI_CMD)) {
 						/*
 						 * Don't abort commands in
@@ -2751,6 +2760,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	spin_lock_init(&ha->tgt.sess_lock);
 	spin_lock_init(&ha->tgt.atio_lock);
 
+	atomic_set(&ha->nvme_active_aen_cnt, 0);
 
 	/* Clear our data area */
 	ha->bars = bars;
@@ -3168,7 +3178,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (ha->mqenable) {
 		bool mq = false;
 		bool startit = false;
-		ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+		ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
 
 		if (QLA_TGT_MODE_ENABLED()) {
 			mq = true;
@@ -3328,6 +3338,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (test_bit(UNLOADING, &base_vha->dpc_flags))
 		return -ENODEV;
 
+	if (ha->flags.detected_lr_sfp) {
+		ql_log(ql_log_info, base_vha, 0xffff,
+		    "Reset chip to pick up LR SFP setting\n");
+		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+		qla2xxx_wake_dpc(base_vha);
+	}
+
 	return 0;
 
 probe_init_failed:
@@ -3383,12 +3400,22 @@ qla2x00_shutdown(struct pci_dev *pdev)
 	scsi_qla_host_t *vha;
 	struct qla_hw_data  *ha;
 
-	if (!atomic_read(&pdev->enable_cnt))
-		return;
-
 	vha = pci_get_drvdata(pdev);
 	ha = vha->hw;
 
+	ql_log(ql_log_info, vha, 0xfffa,
+		"Adapter shutdown\n");
+
+	/*
+	 * Prevent future board_disable and wait
+	 * until any pending board_disable has completed.
+	 */
+	set_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags);
+	cancel_work_sync(&ha->board_disable);
+
+	if (!atomic_read(&pdev->enable_cnt))
+		return;
+
 	/* Notify ISPFX00 firmware */
 	if (IS_QLAFX00(ha))
 		qlafx00_driver_shutdown(vha, 20);
@@ -3419,8 +3446,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
 
 	qla2x00_free_fw_dump(ha);
 
-	pci_disable_pcie_error_reporting(pdev);
 	pci_disable_device(pdev);
+	ql_log(ql_log_info, vha, 0xfffe,
+		"Adapter shutdown successfully.\n");
 }
 
 /* Deletes all the virtual ports for a given ha */
@@ -4006,8 +4034,18 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
 		    "loop_id_map=%p.\n", ha->loop_id_map);
 	}
 
+	ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev,
+	    SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL);
+	if (!ha->sfp_data) {
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+		    "Unable to allocate memory for SFP read-data.\n");
+		goto fail_sfp_data;
+	}
+
 	return 0;
 
+fail_sfp_data:
+	kfree(ha->loop_id_map);
 fail_loop_id_map:
 	dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
 fail_async_pd:
@@ -4345,7 +4383,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 		ha->ct_sns, ha->ct_sns_dma);
 
 	if (ha->sfp_data)
-		dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
+		dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
+		    ha->sfp_data_dma);
 
 	if (ha->ms_iocb)
 		dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
@@ -4638,9 +4677,10 @@ static
 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 {
 	unsigned long flags;
-	fc_port_t *fcport =  NULL;
+	fc_port_t *fcport =  NULL, *tfcp;
 	struct qlt_plogi_ack_t *pla =
 	    (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
+	uint8_t free_fcport = 0;
 
 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 	fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
@@ -4655,6 +4695,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 			pla->ref_count--;
 		}
 	} else {
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
 		if (fcport) {
 			fcport->d_id = e->u.new_sess.id;
@@ -4664,6 +4705,29 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 
 			memcpy(fcport->port_name, e->u.new_sess.port_name,
 			    WWN_SIZE);
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0xffff,
+				   "%s %8phC mem alloc fail.\n",
+				   __func__, e->u.new_sess.port_name);
+
+			if (pla)
+				kmem_cache_free(qla_tgt_plogi_cachep, pla);
+			return;
+		}
+
+		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+		/* search again to make sure one else got ahead */
+		tfcp = qla2x00_find_fcport_by_wwpn(vha,
+		    e->u.new_sess.port_name, 1);
+		if (tfcp) {
+			/* should rarily happen */
+			ql_dbg(ql_dbg_disc, vha, 0xffff,
+			    "%s %8phC found existing fcport b4 add. DS %d LS %d\n",
+			    __func__, tfcp->port_name, tfcp->disc_state,
+			    tfcp->fw_login_state);
+
+			free_fcport = 1;
+		} else {
 			list_add_tail(&fcport->list, &vha->vp_fcports);
 
 			if (pla) {
@@ -4681,6 +4745,12 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 		else
 			qla24xx_async_gnl(vha, fcport);
 	}
+
+	if (free_fcport) {
+		qla2x00_free_fcport(fcport);
+		if (pla)
+			kmem_cache_free(qla_tgt_plogi_cachep, pla);
+	}
 }
 
 void
@@ -5493,6 +5563,13 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
 	ql_log(ql_log_warn, base_vha, 0x015b,
 	    "Disabling adapter.\n");
 
+	if (!atomic_read(&pdev->enable_cnt)) {
+		ql_log(ql_log_info, base_vha, 0xfffc,
+		    "PCI device disabled, no action req for PCI error=%lx\n",
+		    base_vha->pci_flags);
+		return;
+	}
+
 	qla2x00_wait_for_sess_deletion(base_vha);
 
 	set_bit(UNLOADING, &base_vha->dpc_flags);
@@ -5687,6 +5764,16 @@ qla2x00_do_dpc(void *data)
 			}
 		}
 
+		if (test_and_clear_bit(DETECT_SFP_CHANGE,
+			&base_vha->dpc_flags) &&
+		    !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) {
+			qla24xx_detect_sfp(base_vha);
+
+			if (ha->flags.detected_lr_sfp !=
+			    ha->flags.using_lr_setting)
+				set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+		}
+
 		if (test_and_clear_bit(ISP_ABORT_NEEDED,
 						&base_vha->dpc_flags)) {
 
@@ -5828,6 +5915,17 @@ qla2x00_do_dpc(void *data)
 			mutex_unlock(&ha->mq_lock);
 		}
 
+		if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) {
+			ql_log(ql_log_info, base_vha, 0xffffff,
+				"nvme: SET ZIO Activity exchange threshold to %d.\n",
+						ha->nvme_last_rptd_aen);
+			if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) {
+				ql_log(ql_log_info, base_vha, 0xffffff,
+					"nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
+						ha->nvme_last_rptd_aen);
+			}
+		}
+
 		if (!IS_QLAFX00(ha))
 			qla2x00_do_dpc_all_vps(base_vha);
 
@@ -6025,12 +6123,15 @@ qla2x00_timer(scsi_qla_host_t *vha)
 	 * FC-NVME
 	 * see if the active AEN count has changed from what was last reported.
 	 */
-	if (atomic_read(&vha->nvme_active_aen_cnt) != vha->nvme_last_rptd_aen) {
-		vha->nvme_last_rptd_aen =
-		    atomic_read(&vha->nvme_active_aen_cnt);
+	if (!vha->vp_idx &&
+		atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen &&
+		ha->zio_mode == QLA_ZIO_MODE_6) {
 		ql_log(ql_log_info, vha, 0x3002,
-		    "reporting new aen count of %d to the fw\n",
-		    vha->nvme_last_rptd_aen);
+			"nvme: Sched: Set ZIO exchange threshold to %d.\n",
+			ha->nvme_last_rptd_aen);
+		ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
+		set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+		start_dpc++;
 	}
 
 	/* Schedule the DPC routine if needed */
@@ -6181,6 +6282,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
 	ql_dbg(ql_dbg_aer, vha, 0x9000,
 	    "PCI error detected, state %x.\n", state);
 
+	if (!atomic_read(&pdev->enable_cnt)) {
+		ql_log(ql_log_info, vha, 0xffff,
+			"PCI device is disabled,state %x\n", state);
+		return PCI_ERS_RESULT_NEED_RESET;
+	}
+
 	switch (state) {
 	case pci_channel_io_normal:
 		ha->flags.eeh_busy = 0;
@@ -6574,6 +6681,8 @@ qla2x00_module_init(void)
 	strcpy(qla2x00_version_str, QLA2XXX_VERSION);
 	if (ql2xextended_error_logging)
 		strcat(qla2x00_version_str, "-debug");
+	if (ql2xextended_error_logging == 1)
+		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
 
 	qla2xxx_transport_template =
 	    fc_attach_transport(&qla2xxx_transport_functions);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e101cd3043b94539a2893e77aa9949c8fd9c7acb..f05cfc83c9c8d0110c253d0e2331a7e970f72d26 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -145,7 +145,7 @@ static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
  * Global Variables
  */
 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
-static struct kmem_cache *qla_tgt_plogi_cachep;
+struct kmem_cache *qla_tgt_plogi_cachep;
 static mempool_t *qla_tgt_mgmt_cmd_mempool;
 static struct workqueue_struct *qla_tgt_wq;
 static DEFINE_MUTEX(qla_tgt_mutex);
@@ -585,11 +585,13 @@ void qla2x00_async_nack_sp_done(void *s, int res)
 		sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
 		sp->fcport->logout_on_delete = 1;
 		sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
+		sp->fcport->send_els_logo = 0;
 		break;
 
 	case SRB_NACK_PRLI:
 		sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
 		sp->fcport->deleted = 0;
+		sp->fcport->send_els_logo = 0;
 
 		if (!sp->fcport->login_succ &&
 		    !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
@@ -1479,7 +1481,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
 	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
 
-	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
 
 	/* Big hammer */
 	if (!ha->flags.host_shutting_down &&
@@ -1487,7 +1489,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
 		qlt_disable_vha(vha);
 
 	/* Wait for sessions to clear out (just in case) */
-	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
 	return 0;
 }
 EXPORT_SYMBOL(qlt_stop_phase1);
@@ -1528,6 +1530,7 @@ static void qlt_release(struct qla_tgt *tgt)
 	u64 key = 0;
 	u16 i;
 	struct qla_qpair_hint *h;
+	struct qla_hw_data *ha = vha->hw;
 
 	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
 	    !tgt->tgt_stopped)
@@ -1548,12 +1551,18 @@ static void qlt_release(struct qla_tgt *tgt)
 		}
 	}
 	kfree(tgt->qphints);
+	mutex_lock(&qla_tgt_mutex);
+	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
+	mutex_unlock(&qla_tgt_mutex);
 
 	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
 		btree_remove64(&tgt->lun_qpair_map, key);
 
 	btree_destroy64(&tgt->lun_qpair_map);
 
+	if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
+		ha->tgt.tgt_ops->remove_target(vha);
+
 	vha->vha_tgt.qla_tgt = NULL;
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
@@ -1901,6 +1910,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
 	mcmd->reset_count = ha->base_qpair->chip_reset;
 	mcmd->tmr_func = QLA_TGT_ABTS;
 	mcmd->qpair = ha->base_qpair;
+	mcmd->vha = vha;
 
 	/*
 	 * LUN is looked up by target-core internally based on the passed
@@ -2003,7 +2013,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
 	struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
 {
-	struct scsi_qla_host *ha = qpair->vha;
+	struct scsi_qla_host *ha = mcmd->vha;
 	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
 	struct ctio7_to_24xx *ctio;
 	uint16_t temp;
@@ -3464,6 +3474,9 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
 
 	ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
 
+	if (cmd)
+		vha = cmd->vha;
+
 	pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
 	if (pkt == NULL) {
 		ql_dbg(ql_dbg_tgt, vha, 0xe050,
@@ -4379,6 +4392,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
 	mcmd->flags = flags;
 	mcmd->reset_count = ha->base_qpair->chip_reset;
 	mcmd->qpair = ha->base_qpair;
+	mcmd->vha = vha;
 
 	switch (fn) {
 	case QLA_TGT_LUN_RESET:
@@ -6170,10 +6184,6 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
 	/* free left over qfull cmds */
 	qlt_init_term_exchange(vha);
 
-	mutex_lock(&qla_tgt_mutex);
-	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
-	mutex_unlock(&qla_tgt_mutex);
-
 	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
 	    vha->host_no, ha);
 	qlt_release(vha->vha_tgt.qla_tgt);
@@ -6530,7 +6540,6 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
 
 	/* Adjust ring index */
 	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
-	RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
 }
 
 void
@@ -6796,7 +6805,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
 	if (!QLA_TGT_MODE_ENABLED())
 		return;
 
-	if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if  (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
 		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
 	} else {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 7fe02d036bdf1c92bff8a02c31a6b8b0fd1357bc..aba58d3848a6e7c99c264261f2680f425d7e10e6 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -705,6 +705,7 @@ struct qla_tgt_func_tmpl {
 	int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
 	int (*chk_dif_tags)(uint32_t tag);
 	void (*add_target)(struct scsi_qla_host *);
+	void (*remove_target)(struct scsi_qla_host *);
 };
 
 int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -959,6 +960,7 @@ struct qla_tgt_mgmt_cmd {
 	uint8_t fc_tm_rsp;
 	struct fc_port *sess;
 	struct qla_qpair *qpair;
+	struct scsi_qla_host *vha;
 	struct se_cmd se_cmd;
 	struct work_struct free_work;
 	unsigned int flags;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index b18646d6057f476e70d59137a0450e5f90d4b3fc..733e8dcccf5c3009314c2b648bf7af1a4d2ef660 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -443,8 +443,12 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
 		qla27xx_skip_entry(ent, buf);
 	}
 
-	if (buf)
-		ent->t263.num_queues = count;
+	if (buf) {
+		if (count)
+			ent->t263.num_queues = count;
+		else
+			qla27xx_skip_entry(ent, buf);
+	}
 
 	return false;
 }
@@ -692,11 +696,12 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
 		qla27xx_skip_entry(ent, buf);
 	}
 
-	if (buf)
-		ent->t274.num_queues = count;
-
-	if (!count)
-		qla27xx_skip_entry(ent, buf);
+	if (buf) {
+		if (count)
+			ent->t274.num_queues = count;
+		else
+			qla27xx_skip_entry(ent, buf);
+	}
 
 	return false;
 }
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 005a378f7fabb19449857f995e4a09f25c496d2a..8c4b505c9f66fb38797e1a1e628de78fd486e6cc 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "10.00.00.00-k"
+#define QLA2XXX_VERSION      "10.00.00.01-k"
 
 #define QLA_DRIVER_MAJOR_VER	10
 #define QLA_DRIVER_MINOR_VER	0
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 840823b99e51a37505f4fce35fad9877153f10cd..95431d605c24c970563d863c11502fdb9b69eb3c 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -188,7 +188,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
 	.info			= qlogicfas408_info,
 	.queuecommand		= qlogicfas408_queuecommand,
 	.eh_abort_handler	= qlogicfas408_abort,
-	.eh_bus_reset_handler	= qlogicfas408_bus_reset,
+	.eh_host_reset_handler	= qlogicfas408_host_reset,
 	.bios_param		= qlogicfas408_biosparam,
 	.can_queue		= 1,
 	.this_id		= -1,
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index c3a9151ca823cb64651390e940794262d9b7d989..8b471a925b43d82525f89f72209402f61a9562a7 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -496,13 +496,13 @@ int qlogicfas408_abort(struct scsi_cmnd *cmd)
 	return SUCCESS;
 }
 
-/* 
+/*
  *	Reset SCSI bus
  *	FIXME: This function is invoked with cmd = NULL directly by
  *	the PCMCIA qlogic_stub code. This wants fixing
  */
 
-int qlogicfas408_bus_reset(struct scsi_cmnd *cmd)
+int qlogicfas408_host_reset(struct scsi_cmnd *cmd)
 {
 	struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
 	unsigned long flags;
@@ -607,7 +607,7 @@ module_exit(qlogicfas408_exit);
 EXPORT_SYMBOL(qlogicfas408_info);
 EXPORT_SYMBOL(qlogicfas408_queuecommand);
 EXPORT_SYMBOL(qlogicfas408_abort);
-EXPORT_SYMBOL(qlogicfas408_bus_reset);
+EXPORT_SYMBOL(qlogicfas408_host_reset);
 EXPORT_SYMBOL(qlogicfas408_biosparam);
 EXPORT_SYMBOL(qlogicfas408_ihandl);
 EXPORT_SYMBOL(qlogicfas408_get_chip_type);
diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h
index 2f6c0a166200fdea89aba2d8aee31281718a6be1..f6b1216af79f30464effab44a3c7c832cc23af4a 100644
--- a/drivers/scsi/qlogicfas408.h
+++ b/drivers/scsi/qlogicfas408.h
@@ -108,7 +108,7 @@ int qlogicfas408_biosparam(struct scsi_device * disk,
 			   struct block_device *dev,
 			   sector_t capacity, int ip[]);
 int qlogicfas408_abort(struct scsi_cmnd * cmd);
-int qlogicfas408_bus_reset(struct scsi_cmnd * cmd);
+extern int qlogicfas408_host_reset(struct scsi_cmnd *cmd);
 const char *qlogicfas408_info(struct Scsi_Host *host);
 int qlogicfas408_get_chip_type(int qbase, int int_type);
 void qlogicfas408_setup(int qbase, int id, int int_type);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 69bfc0a1aea30f99319ad50c6ecaec0649cf8862..cec9a14982e6a3720afaf18633a8f39314a0c1e0 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1283,7 +1283,7 @@ static struct scsi_host_template qpti_template = {
 	.queuecommand		= qlogicpti_queuecommand,
 	.slave_configure	= qlogicpti_slave_configure,
 	.eh_abort_handler	= qlogicpti_abort,
-	.eh_bus_reset_handler	= qlogicpti_reset,
+	.eh_host_reset_handler	= qlogicpti_reset,
 	.can_queue		= QLOGICPTI_REQ_QUEUE_LEN,
 	.this_id		= 7,
 	.sg_tablesize		= QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1bf274e3b2b614b5331f4a082668e660ceba74e7..a7e4fba724b7a9ffa38b110b95b75ef391e1335b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -411,6 +411,57 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
 }
 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 
+/**
+ * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device
+ * @sdev: The device to ask
+ * @page: Which Vital Product Data to return
+ *
+ * Returns %NULL upon failure.
+ */
+static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
+{
+	struct scsi_vpd *vpd_buf;
+	int vpd_len = SCSI_VPD_PG_LEN, result;
+
+retry_pg:
+	vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL);
+	if (!vpd_buf)
+		return NULL;
+
+	result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len);
+	if (result < 0) {
+		kfree(vpd_buf);
+		return NULL;
+	}
+	if (result > vpd_len) {
+		vpd_len = result;
+		kfree(vpd_buf);
+		goto retry_pg;
+	}
+
+	vpd_buf->len = result;
+
+	return vpd_buf;
+}
+
+static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page,
+				 struct scsi_vpd __rcu **sdev_vpd_buf)
+{
+	struct scsi_vpd *vpd_buf;
+
+	vpd_buf = scsi_get_vpd_buf(sdev, page);
+	if (!vpd_buf)
+		return;
+
+	mutex_lock(&sdev->inquiry_mutex);
+	rcu_swap_protected(*sdev_vpd_buf, vpd_buf,
+			   lockdep_is_held(&sdev->inquiry_mutex));
+	mutex_unlock(&sdev->inquiry_mutex);
+
+	if (vpd_buf)
+		kfree_rcu(vpd_buf, rcu);
+}
+
 /**
  * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
  * @sdev: The device to ask
@@ -422,95 +473,24 @@ EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
  */
 void scsi_attach_vpd(struct scsi_device *sdev)
 {
-	int result, i;
-	int vpd_len = SCSI_VPD_PG_LEN;
-	int pg80_supported = 0;
-	int pg83_supported = 0;
-	unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
+	int i;
+	struct scsi_vpd *vpd_buf;
 
 	if (!scsi_device_supports_vpd(sdev))
 		return;
 
-retry_pg0:
-	vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
-	if (!vpd_buf)
-		return;
-
 	/* Ask for all the pages supported by this device */
-	result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
-	if (result < 0) {
-		kfree(vpd_buf);
+	vpd_buf = scsi_get_vpd_buf(sdev, 0);
+	if (!vpd_buf)
 		return;
-	}
-	if (result > vpd_len) {
-		vpd_len = result;
-		kfree(vpd_buf);
-		goto retry_pg0;
-	}
 
-	for (i = 4; i < result; i++) {
-		if (vpd_buf[i] == 0x80)
-			pg80_supported = 1;
-		if (vpd_buf[i] == 0x83)
-			pg83_supported = 1;
+	for (i = 4; i < vpd_buf->len; i++) {
+		if (vpd_buf->data[i] == 0x80)
+			scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
+		if (vpd_buf->data[i] == 0x83)
+			scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
 	}
 	kfree(vpd_buf);
-	vpd_len = SCSI_VPD_PG_LEN;
-
-	if (pg80_supported) {
-retry_pg80:
-		vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
-		if (!vpd_buf)
-			return;
-
-		result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
-		if (result < 0) {
-			kfree(vpd_buf);
-			return;
-		}
-		if (result > vpd_len) {
-			vpd_len = result;
-			kfree(vpd_buf);
-			goto retry_pg80;
-		}
-		mutex_lock(&sdev->inquiry_mutex);
-		orig_vpd_buf = sdev->vpd_pg80;
-		sdev->vpd_pg80_len = result;
-		rcu_assign_pointer(sdev->vpd_pg80, vpd_buf);
-		mutex_unlock(&sdev->inquiry_mutex);
-		synchronize_rcu();
-		if (orig_vpd_buf) {
-			kfree(orig_vpd_buf);
-			orig_vpd_buf = NULL;
-		}
-		vpd_len = SCSI_VPD_PG_LEN;
-	}
-
-	if (pg83_supported) {
-retry_pg83:
-		vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
-		if (!vpd_buf)
-			return;
-
-		result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
-		if (result < 0) {
-			kfree(vpd_buf);
-			return;
-		}
-		if (result > vpd_len) {
-			vpd_len = result;
-			kfree(vpd_buf);
-			goto retry_pg83;
-		}
-		mutex_lock(&sdev->inquiry_mutex);
-		orig_vpd_buf = sdev->vpd_pg83;
-		sdev->vpd_pg83_len = result;
-		rcu_assign_pointer(sdev->vpd_pg83, vpd_buf);
-		mutex_unlock(&sdev->inquiry_mutex);
-		synchronize_rcu();
-		if (orig_vpd_buf)
-			kfree(orig_vpd_buf);
-	}
 }
 
 /**
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 3be980d472681a4410d681e99d649b99cddb3cea..77a0335eb757de1c3678e267020f55f893f4964c 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2261,7 +2261,7 @@ static int resp_ie_l_pg(unsigned char * arr)
 static int resp_log_sense(struct scsi_cmnd * scp,
                           struct sdebug_dev_info * devip)
 {
-	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
+	int ppc, sp, pcode, subpcode, alloc_len, len, n;
 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
 	unsigned char *cmd = scp->cmnd;
 
@@ -2272,7 +2272,6 @@ static int resp_log_sense(struct scsi_cmnd * scp,
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
 		return check_condition_result;
 	}
-	pcontrol = (cmd[2] & 0xc0) >> 6;
 	pcode = cmd[2] & 0x3f;
 	subpcode = cmd[3] & 0xff;
 	alloc_len = get_unaligned_be16(cmd + 7);
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index a97c9507103d2180b776faaa4885796f9b466876..5e9755008aed40f054a394d06031dd351ec2dece 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -6,8 +6,10 @@
 void scsi_show_rq(struct seq_file *m, struct request *rq)
 {
 	struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
+	int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
 	char buf[80];
 
 	__scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
-	seq_printf(m, ", .cmd=%s", buf);
+	seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf,
+		   cmd->retries, msecs / 1000, msecs % 1000);
 }
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ea9f40e51f684ea530dae52b7a17d0a1f45b695c..38942050b2656bd104abb465a79b29c4e7039d43 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -259,7 +259,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
  */
 enum blk_eh_timer_return scsi_times_out(struct request *req)
 {
-	struct scsi_cmnd *scmd = req->special;
+	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
 	enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
 	struct Scsi_Host *host = scmd->device->host;
 
@@ -552,6 +552,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
 			set_host_byte(scmd, DID_ALLOC_FAILURE);
 			return SUCCESS;
 		}
+		/* FALLTHROUGH */
 	case COPY_ABORTED:
 	case VOLUME_OVERFLOW:
 	case MISCOMPARE:
@@ -573,6 +574,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
 			return ADD_TO_MLQUEUE;
 		else
 			set_host_byte(scmd, DID_TARGET_FAILURE);
+		/* FALLTHROUGH */
 
 	case ILLEGAL_REQUEST:
 		if (sshdr.asc == 0x20 || /* Invalid command operation code */
@@ -683,6 +685,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
 	switch (status_byte(scmd->result)) {
 	case GOOD:
 		scsi_handle_queue_ramp_up(scmd->device);
+		/* FALLTHROUGH */
 	case COMMAND_TERMINATED:
 		return SUCCESS;
 	case CHECK_CONDITION:
@@ -1734,6 +1737,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 			set_host_byte(scmd, DID_TIME_OUT);
 			return SUCCESS;
 		}
+		/* FALLTHROUGH */
 	case DID_NO_CONNECT:
 	case DID_BAD_TARGET:
 		/*
@@ -1819,6 +1823,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 		 * the case of trying to send too many commands to a
 		 * tagged queueing device.
 		 */
+		/* FALLTHROUGH */
 	case BUSY:
 		/*
 		 * device can't talk to us at the moment.  Should only
@@ -1831,6 +1836,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 		if (scmd->cmnd[0] == REPORT_LUNS)
 			scmd->device->sdev_target->expecting_lun_change = 0;
 		scsi_handle_queue_ramp_up(scmd->device);
+		/* FALLTHROUGH */
 	case COMMAND_TERMINATED:
 		return SUCCESS;
 	case TASK_ABORTED:
@@ -2320,8 +2326,8 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
 		rtn = scsi_try_host_reset(scmd);
 		if (rtn == SUCCESS)
 			break;
-	default:
 		/* FALLTHROUGH */
+	default:
 		rtn = FAILED;
 		break;
 	}
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index b6bf3f29a12a4e807f86e1a1025e9907f23ddd1e..0a875491f5a7621d59268217890c973ab6047083 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -116,13 +116,15 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
 		case NOT_READY:	/* This happens if there is no disc in drive */
 			if (sdev->removable)
 				break;
+			/* FALLTHROUGH */
 		case UNIT_ATTENTION:
 			if (sdev->removable) {
 				sdev->changed = 1;
 				result = 0;	/* This is no longer considered an error */
 				break;
 			}
-		default:	/* Fall through for non-removable media */
+			/* FALLTHROUGH -- for non-removable media */
+		default:
 			sdev_printk(KERN_INFO, sdev,
 				    "ioctl_internal_command return code = %x\n",
 				    result);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f6097b89d5d381229cc0be0a9753c908de5caa47..9cf6a80fe29754fc93d96d41edb090db2fdd67f4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -44,6 +44,8 @@ static struct kmem_cache *scsi_sense_cache;
 static struct kmem_cache *scsi_sense_isadma_cache;
 static DEFINE_MUTEX(scsi_sense_cache_mutex);
 
+static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
+
 static inline struct kmem_cache *
 scsi_select_sense_cache(bool unchecked_isa_dma)
 {
@@ -140,6 +142,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
 {
 	struct scsi_device *sdev = cmd->device;
 
+	if (cmd->request->rq_flags & RQF_DONTPREP) {
+		cmd->request->rq_flags &= ~RQF_DONTPREP;
+		scsi_mq_uninit_cmd(cmd);
+	} else {
+		WARN_ON_ONCE(true);
+	}
 	blk_mq_requeue_request(cmd->request, true);
 	put_device(&sdev->sdev_gendev);
 }
@@ -627,7 +635,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
 static bool scsi_end_request(struct request *req, blk_status_t error,
 		unsigned int bytes, unsigned int bidi_bytes)
 {
-	struct scsi_cmnd *cmd = req->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 	struct scsi_device *sdev = cmd->device;
 	struct request_queue *q = sdev->request_queue;
 
@@ -642,6 +650,11 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
 	if (blk_queue_add_random(q))
 		add_disk_randomness(req->rq_disk);
 
+	if (!blk_rq_is_scsi(req)) {
+		WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
+		cmd->flags &= ~SCMD_INITIALIZED;
+	}
+
 	if (req->mq_ctx) {
 		/*
 		 * In the MQ case the command gets freed by __blk_mq_end_request,
@@ -977,8 +990,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 		 * A new command will be prepared and issued.
 		 */
 		if (q->mq_ops) {
-			cmd->request->rq_flags &= ~RQF_DONTPREP;
-			scsi_mq_uninit_cmd(cmd);
 			scsi_mq_requeue_cmd(cmd);
 		} else {
 			scsi_release_buffers(cmd);
@@ -1107,15 +1118,23 @@ int scsi_init_io(struct scsi_cmnd *cmd)
 EXPORT_SYMBOL(scsi_init_io);
 
 /**
- * scsi_initialize_rq - initialize struct scsi_cmnd.req
+ * scsi_initialize_rq - initialize struct scsi_cmnd partially
+ * @rq: Request associated with the SCSI command to be initialized.
+ *
+ * This function initializes the members of struct scsi_cmnd that must be
+ * initialized before request processing starts and that won't be
+ * reinitialized if a SCSI command is requeued.
  *
- * Called from inside blk_get_request().
+ * Called from inside blk_get_request() for pass-through requests and from
+ * inside scsi_init_command() for filesystem requests.
  */
 void scsi_initialize_rq(struct request *rq)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 
 	scsi_req_init(&cmd->req);
+	cmd->jiffies_at_alloc = jiffies;
+	cmd->retries = 0;
 }
 EXPORT_SYMBOL(scsi_initialize_rq);
 
@@ -1153,8 +1172,18 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
 {
 	void *buf = cmd->sense_buffer;
 	void *prot = cmd->prot_sdb;
-	unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA;
+	struct request *rq = blk_mq_rq_from_pdu(cmd);
+	unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
+	unsigned long jiffies_at_alloc;
+	int retries;
+
+	if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
+		flags |= SCMD_INITIALIZED;
+		scsi_initialize_rq(rq);
+	}
 
+	jiffies_at_alloc = cmd->jiffies_at_alloc;
+	retries = cmd->retries;
 	/* zero out the cmd, except for the embedded scsi_request */
 	memset((char *)cmd + sizeof(cmd->req), 0,
 		sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
@@ -1162,16 +1191,17 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
 	cmd->device = dev;
 	cmd->sense_buffer = buf;
 	cmd->prot_sdb = prot;
-	cmd->flags = unchecked_isa_dma;
+	cmd->flags = flags;
 	INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
-	cmd->jiffies_at_alloc = jiffies;
+	cmd->jiffies_at_alloc = jiffies_at_alloc;
+	cmd->retries = retries;
 
 	scsi_add_cmd_to_list(cmd);
 }
 
 static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
 {
-	struct scsi_cmnd *cmd = req->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 
 	/*
 	 * Passthrough requests may transfer data, in which case they must
@@ -1202,7 +1232,7 @@ static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
  */
 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
 {
-	struct scsi_cmnd *cmd = req->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 
 	if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
 		int ret = sdev->handler->prep_fn(sdev, req);
@@ -1217,7 +1247,7 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
 
 static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
 {
-	struct scsi_cmnd *cmd = req->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 
 	if (!blk_rq_bytes(req))
 		cmd->sc_data_direction = DMA_NONE;
@@ -1349,12 +1379,14 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
 
 	ret = scsi_setup_cmnd(sdev, req);
 out:
+	if (ret != BLKPREP_OK)
+		cmd->flags &= ~SCMD_INITIALIZED;
 	return scsi_prep_return(q, req, ret);
 }
 
 static void scsi_unprep_fn(struct request_queue *q, struct request *req)
 {
-	scsi_uninit_cmd(req->special);
+	scsi_uninit_cmd(blk_mq_rq_to_pdu(req));
 }
 
 /*
@@ -1545,7 +1577,7 @@ static int scsi_lld_busy(struct request_queue *q)
  */
 static void scsi_kill_request(struct request *req, struct request_queue *q)
 {
-	struct scsi_cmnd *cmd = req->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 	struct scsi_device *sdev;
 	struct scsi_target *starget;
 	struct Scsi_Host *shost;
@@ -1576,7 +1608,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
 
 static void scsi_softirq_done(struct request *rq)
 {
-	struct scsi_cmnd *cmd = rq->special;
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 	unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
 	int disposition;
 
@@ -1764,8 +1796,8 @@ static void scsi_request_fn(struct request_queue *q)
 			blk_start_request(req);
 
 		spin_unlock_irq(q->queue_lock);
-		cmd = req->special;
-		if (unlikely(cmd == NULL)) {
+		cmd = blk_mq_rq_to_pdu(req);
+		if (cmd != req->special) {
 			printk(KERN_CRIT "impossible request in %s.\n"
 					 "please mail a stack trace to "
 					 "linux-scsi@vger.kernel.org\n",
@@ -1868,6 +1900,7 @@ static int scsi_mq_prep_fn(struct request *req)
 	struct scsi_device *sdev = req->q->queuedata;
 	struct Scsi_Host *shost = sdev->host;
 	struct scatterlist *sg;
+	int ret;
 
 	scsi_init_command(sdev, cmd);
 
@@ -1901,7 +1934,10 @@ static int scsi_mq_prep_fn(struct request *req)
 
 	blk_mq_start_request(req);
 
-	return scsi_setup_cmnd(sdev, req);
+	ret = scsi_setup_cmnd(sdev, req);
+	if (ret != BLK_STS_OK)
+		cmd->flags &= ~SCMD_INITIALIZED;
+	return ret;
 }
 
 static void scsi_mq_done(struct scsi_cmnd *cmd)
@@ -2001,8 +2037,8 @@ static enum blk_eh_timer_return scsi_timeout(struct request *req,
 	return scsi_times_out(req);
 }
 
-static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
-		unsigned int hctx_idx, unsigned int numa_node)
+static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+				unsigned int hctx_idx, unsigned int numa_node)
 {
 	struct Scsi_Host *shost = set->driver_data;
 	const bool unchecked_isa_dma = shost->unchecked_isa_dma;
@@ -2026,8 +2062,8 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
 	return 0;
 }
 
-static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq,
-		unsigned int hctx_idx)
+static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
+				 unsigned int hctx_idx)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 
@@ -2104,7 +2140,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(__scsi_init_queue);
 
-static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
+static int scsi_old_init_rq(struct request_queue *q, struct request *rq,
+			    gfp_t gfp)
 {
 	struct Scsi_Host *shost = q->rq_alloc_data;
 	const bool unchecked_isa_dma = shost->unchecked_isa_dma;
@@ -2134,7 +2171,7 @@ static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
 	return -ENOMEM;
 }
 
-static void scsi_exit_rq(struct request_queue *q, struct request *rq)
+static void scsi_old_exit_rq(struct request_queue *q, struct request *rq)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 
@@ -2144,7 +2181,7 @@ static void scsi_exit_rq(struct request_queue *q, struct request *rq)
 			       cmd->sense_buffer);
 }
 
-struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
 {
 	struct Scsi_Host *shost = sdev->host;
 	struct request_queue *q;
@@ -2155,8 +2192,8 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
 	q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
 	q->rq_alloc_data = shost;
 	q->request_fn = scsi_request_fn;
-	q->init_rq_fn = scsi_init_rq;
-	q->exit_rq_fn = scsi_exit_rq;
+	q->init_rq_fn = scsi_old_init_rq;
+	q->exit_rq_fn = scsi_old_exit_rq;
 	q->initialize_rq_fn = scsi_initialize_rq;
 
 	if (blk_init_allocated_queue(q) < 0) {
@@ -2180,8 +2217,8 @@ static const struct blk_mq_ops scsi_mq_ops = {
 #ifdef CONFIG_BLK_DEBUG_FS
 	.show_rq	= scsi_show_rq,
 #endif
-	.init_request	= scsi_init_request,
-	.exit_request	= scsi_exit_request,
+	.init_request	= scsi_mq_init_request,
+	.exit_request	= scsi_mq_exit_request,
 	.initialize_rq_fn = scsi_initialize_rq,
 	.map_queues	= scsi_map_queues,
 };
@@ -2547,7 +2584,7 @@ EXPORT_SYMBOL(scsi_test_unit_ready);
  *	@sdev:	scsi device to change the state of.
  *	@state:	state to change to.
  *
- *	Returns zero if unsuccessful or an error if the requested 
+ *	Returns zero if successful or an error if the requested
  *	transition is illegal.
  */
 int
@@ -2654,6 +2691,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
 
 	}
 	sdev->sdev_state = state;
+	sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
 	return 0;
 
  illegal:
@@ -3073,19 +3111,26 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
 	 * Try to transition the scsi device to SDEV_RUNNING or one of the
 	 * offlined states and goose the device queue if successful.
 	 */
-	if ((sdev->sdev_state == SDEV_BLOCK) ||
-	    (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
+	switch (sdev->sdev_state) {
+	case SDEV_BLOCK:
+	case SDEV_TRANSPORT_OFFLINE:
 		sdev->sdev_state = new_state;
-	else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
+		sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
+		break;
+	case SDEV_CREATED_BLOCK:
 		if (new_state == SDEV_TRANSPORT_OFFLINE ||
 		    new_state == SDEV_OFFLINE)
 			sdev->sdev_state = new_state;
 		else
 			sdev->sdev_state = SDEV_CREATED;
-	} else if (sdev->sdev_state != SDEV_CANCEL &&
-		 sdev->sdev_state != SDEV_OFFLINE)
+		sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
+		break;
+	case SDEV_CANCEL:
+	case SDEV_OFFLINE:
+		break;
+	default:
 		return -EINVAL;
-
+	}
 	scsi_start_queue(sdev);
 
 	return 0;
@@ -3262,8 +3307,8 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
 {
 	u8 cur_id_type = 0xff;
 	u8 cur_id_size = 0;
-	unsigned char *d, *cur_id_str;
-	unsigned char __rcu *vpd_pg83;
+	const unsigned char *d, *cur_id_str;
+	const struct scsi_vpd *vpd_pg83;
 	int id_size = -EINVAL;
 
 	rcu_read_lock();
@@ -3294,8 +3339,8 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
 	}
 
 	memset(id, 0, id_len);
-	d = vpd_pg83 + 4;
-	while (d < vpd_pg83 + sdev->vpd_pg83_len) {
+	d = vpd_pg83->data + 4;
+	while (d < vpd_pg83->data + vpd_pg83->len) {
 		/* Skip designators not referring to the LUN */
 		if ((d[1] & 0x30) != 0x00)
 			goto next_desig;
@@ -3411,8 +3456,8 @@ EXPORT_SYMBOL(scsi_vpd_lun_id);
  */
 int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
 {
-	unsigned char *d;
-	unsigned char __rcu *vpd_pg83;
+	const unsigned char *d;
+	const struct scsi_vpd *vpd_pg83;
 	int group_id = -EAGAIN, rel_port = -1;
 
 	rcu_read_lock();
@@ -3422,8 +3467,8 @@ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
 		return -ENXIO;
 	}
 
-	d = sdev->vpd_pg83 + 4;
-	while (d < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
+	d = vpd_pg83->data + 4;
+	while (d < vpd_pg83->data + vpd_pg83->len) {
 		switch (d[1] & 0xf) {
 		case 0x4:
 			/* Relative target port */
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index c11c1f9c912c3e645679c4b53ca6456b89a91fb8..5c6d016a5ae9b8f3dab21cb49789c7ef2255c047 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -88,7 +88,7 @@ extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
 extern void scsi_run_host_queues(struct Scsi_Host *shost);
 extern void scsi_requeue_run_queue(struct work_struct *work);
-extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
+extern struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev);
 extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
 extern void scsi_start_queue(struct scsi_device *sdev);
 extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index fd88dabd599d4d2f24d3bebf78c2b9ea9baa2e89..e7818afeda2bea560101fe023983050d0b3086b2 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -268,7 +268,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
 	if (shost_use_blk_mq(shost))
 		sdev->request_queue = scsi_mq_alloc_queue(sdev);
 	else
-		sdev->request_queue = scsi_alloc_queue(sdev);
+		sdev->request_queue = scsi_old_alloc_queue(sdev);
 	if (!sdev->request_queue) {
 		/* release fn is set up in scsi_sysfs_device_initialise, so
 		 * have to free and put manually here */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index d6984df71f1c6da6df6ae4f5f153c909c33bc397..bf53356f41f0639e7c0e70393d91337a2bed2880 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -114,7 +114,7 @@ static int check_set(unsigned long long *val, char *src)
 {
 	char *last;
 
-	if (strncmp(src, "-", 20) == 0) {
+	if (strcmp(src, "-") == 0) {
 		*val = SCAN_WILD_CARD;
 	} else {
 		/*
@@ -303,6 +303,8 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
 
 	if (sht->host_reset)
 		ret = sht->host_reset(shost, type);
+	else
+		ret = -EOPNOTSUPP;
 
 exit_store_host_reset:
 	if (ret == 0)
@@ -426,6 +428,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
 	struct scsi_device *sdev;
 	struct device *parent;
 	struct list_head *this, *tmp;
+	struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
 	unsigned long flags;
 
 	sdev = container_of(work, struct scsi_device, ew.work);
@@ -454,8 +457,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
 	/* NULL queue means the device can't be used */
 	sdev->request_queue = NULL;
 
-	kfree(sdev->vpd_pg83);
-	kfree(sdev->vpd_pg80);
+	mutex_lock(&sdev->inquiry_mutex);
+	rcu_swap_protected(sdev->vpd_pg80, vpd_pg80,
+			   lockdep_is_held(&sdev->inquiry_mutex));
+	rcu_swap_protected(sdev->vpd_pg83, vpd_pg83,
+			   lockdep_is_held(&sdev->inquiry_mutex));
+	mutex_unlock(&sdev->inquiry_mutex);
+
+	if (vpd_pg83)
+		kfree_rcu(vpd_pg83, rcu);
+	if (vpd_pg80)
+		kfree_rcu(vpd_pg80, rcu);
 	kfree(sdev->inquiry);
 	kfree(sdev);
 
@@ -793,15 +805,16 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj,	\
 {									\
 	struct device *dev = container_of(kobj, struct device, kobj);	\
 	struct scsi_device *sdev = to_scsi_device(dev);			\
-	int ret;							\
-	if (!sdev->vpd_##_page)						\
-		return -EINVAL;						\
+	struct scsi_vpd *vpd_page;					\
+	int ret = -EINVAL;						\
+									\
 	rcu_read_lock();						\
-	ret = memory_read_from_buffer(buf, count, &off,			\
-				      rcu_dereference(sdev->vpd_##_page), \
-				       sdev->vpd_##_page##_len);	\
+	vpd_page = rcu_dereference(sdev->vpd_##_page);			\
+	if (vpd_page)							\
+		ret = memory_read_from_buffer(buf, count, &off,		\
+				vpd_page->data, vpd_page->len);		\
 	rcu_read_unlock();						\
-	return ret;						\
+	return ret;							\
 }									\
 static struct bin_attribute dev_attr_vpd_##_page = {		\
 	.attr =	{.name = __stringify(vpd_##_page), .mode = S_IRUGO },	\
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 892fbd9800d986a8dba22efac7ac4803be2c5f17..3c6bc0081fcbe34afcfc5c398a5637a2d31ae3ab 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3276,8 +3276,8 @@ fc_scsi_scan_rport(struct work_struct *work)
 }
 
 /**
- * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
- * @cmnd: SCSI command that scsi_eh is trying to recover
+ * fc_block_rport() - Block SCSI eh thread for blocked fc_rport.
+ * @rport: Remote port that scsi_eh is trying to recover.
  *
  * This routine can be called from a FC LLD scsi_eh callback. It
  * blocks the scsi_eh thread until the fc_rport leaves the
@@ -3289,10 +3289,9 @@ fc_scsi_scan_rport(struct work_struct *work)
  *	    FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
  *	    passed back to scsi_eh.
  */
-int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
+int fc_block_rport(struct fc_rport *rport)
 {
-	struct Scsi_Host *shost = cmnd->device->host;
-	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+	struct Scsi_Host *shost = rport_to_shost(rport);
 	unsigned long flags;
 
 	spin_lock_irqsave(shost->host_lock, flags);
@@ -3309,6 +3308,28 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
 
 	return 0;
 }
+EXPORT_SYMBOL(fc_block_rport);
+
+/**
+ * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
+ * @cmnd: SCSI command that scsi_eh is trying to recover
+ *
+ * This routine can be called from a FC LLD scsi_eh callback. It
+ * blocks the scsi_eh thread until the fc_rport leaves the
+ * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
+ * necessary to avoid the scsi_eh failing recovery actions for blocked
+ * rports which would lead to offlined SCSI devices.
+ *
+ * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
+ *	    FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
+ *	    passed back to scsi_eh.
+ */
+int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
+{
+	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+
+	return fc_block_rport(rport);
+}
 EXPORT_SYMBOL(fc_block_scsi_eh);
 
 /**
@@ -3763,7 +3784,8 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
 	snprintf(bsg_name, sizeof(bsg_name),
 		 "fc_host%d", shost->host_no);
 
-	q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size);
+	q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size,
+			NULL);
 	if (IS_ERR(q)) {
 		dev_err(dev,
 			"fc_host%d: bsg interface failed to initialize - setup queue\n",
@@ -3808,7 +3830,8 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
 	if (!i->f->bsg_request)
 		return -ENOTSUPP;
 
-	q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
+	q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size,
+			NULL);
 	if (IS_ERR(q)) {
 		dev_err(dev, "failed to setup bsg queue\n");
 		return PTR_ERR(q);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index a424eaeafeb09852c898dc1e8a2326bce7109f29..8934f19bce8ea815d696189e7bb075c43cffaa5c 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1009,7 +1009,7 @@ static void iscsi_flashnode_sess_release(struct device *dev)
 	kfree(fnode_sess);
 }
 
-static struct device_type iscsi_flashnode_sess_dev_type = {
+static const struct device_type iscsi_flashnode_sess_dev_type = {
 	.name = "iscsi_flashnode_sess_dev_type",
 	.groups = iscsi_flashnode_sess_attr_groups,
 	.release = iscsi_flashnode_sess_release,
@@ -1195,7 +1195,7 @@ static void iscsi_flashnode_conn_release(struct device *dev)
 	kfree(fnode_conn);
 }
 
-static struct device_type iscsi_flashnode_conn_dev_type = {
+static const struct device_type iscsi_flashnode_conn_dev_type = {
 	.name = "iscsi_flashnode_conn_dev_type",
 	.groups = iscsi_flashnode_conn_attr_groups,
 	.release = iscsi_flashnode_conn_release,
@@ -1542,7 +1542,7 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
 		return -ENOTSUPP;
 
 	snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
-	q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0);
+	q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0, NULL);
 	if (IS_ERR(q)) {
 		shost_printk(KERN_ERR, shost, "bsg interface failed to "
 			     "initialize - no request queue\n");
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 5006a656e16a775b055d0c94b8809577ee34fcb3..319dff970237510131e5cce802151b4d731c68b0 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -169,39 +169,22 @@ static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev)
 	return rdev;
 }
 
-static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
-			    struct sas_rphy *rphy)
+static int sas_smp_dispatch(struct bsg_job *job)
 {
-	struct request *req;
-	blk_status_t ret;
-	int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
+	struct Scsi_Host *shost = dev_to_shost(job->dev);
+	struct sas_rphy *rphy = NULL;
 
-	while ((req = blk_fetch_request(q)) != NULL) {
-		spin_unlock_irq(q->queue_lock);
+	if (!scsi_is_host_device(job->dev))
+		rphy = dev_to_rphy(job->dev);
 
-		scsi_req(req)->resid_len = blk_rq_bytes(req);
-		if (req->next_rq)
-			scsi_req(req->next_rq)->resid_len =
-				blk_rq_bytes(req->next_rq);
-		handler = to_sas_internal(shost->transportt)->f->smp_handler;
-		ret = handler(shost, rphy, req);
-		scsi_req(req)->result = ret;
-
-		blk_end_request_all(req, 0);
-
-		spin_lock_irq(q->queue_lock);
+	if (!job->req->next_rq) {
+		dev_warn(job->dev, "space for a smp response is missing\n");
+		bsg_job_done(job, -EINVAL, 0);
+		return 0;
 	}
-}
-
-static void sas_host_smp_request(struct request_queue *q)
-{
-	sas_smp_request(q, (struct Scsi_Host *)q->queuedata, NULL);
-}
 
-static void sas_non_host_smp_request(struct request_queue *q)
-{
-	struct sas_rphy *rphy = q->queuedata;
-	sas_smp_request(q, rphy_to_shost(rphy), rphy);
+	to_sas_internal(shost->transportt)->f->smp_handler(job, shost, rphy);
+	return 0;
 }
 
 static void sas_host_release(struct device *dev)
@@ -217,81 +200,36 @@ static void sas_host_release(struct device *dev)
 static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
 {
 	struct request_queue *q;
-	int error;
-	struct device *dev;
-	char namebuf[20];
-	const char *name;
-	void (*release)(struct device *);
 
 	if (!to_sas_internal(shost->transportt)->f->smp_handler) {
 		printk("%s can't handle SMP requests\n", shost->hostt->name);
 		return 0;
 	}
 
-	q = blk_alloc_queue(GFP_KERNEL);
-	if (!q)
-		return -ENOMEM;
-	q->initialize_rq_fn = scsi_initialize_rq;
-	q->cmd_size = sizeof(struct scsi_request);
-
 	if (rphy) {
-		q->request_fn = sas_non_host_smp_request;
-		dev = &rphy->dev;
-		name = dev_name(dev);
-		release = NULL;
+		q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev),
+				sas_smp_dispatch, 0, NULL);
+		if (IS_ERR(q))
+			return PTR_ERR(q);
+		rphy->q = q;
 	} else {
-		q->request_fn = sas_host_smp_request;
-		dev = &shost->shost_gendev;
-		snprintf(namebuf, sizeof(namebuf),
-			 "sas_host%d", shost->host_no);
-		name = namebuf;
-		release = sas_host_release;
+		char name[20];
+
+		snprintf(name, sizeof(name), "sas_host%d", shost->host_no);
+		q = bsg_setup_queue(&shost->shost_gendev, name,
+				sas_smp_dispatch, 0, sas_host_release);
+		if (IS_ERR(q))
+			return PTR_ERR(q);
+		to_sas_host_attrs(shost)->q = q;
 	}
-	error = blk_init_allocated_queue(q);
-	if (error)
-		goto out_cleanup_queue;
 
 	/*
 	 * by default assume old behaviour and bounce for any highmem page
 	 */
 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
-
-	error = bsg_register_queue(q, dev, name, release);
-	if (error)
-		goto out_cleanup_queue;
-
-	if (rphy)
-		rphy->q = q;
-	else
-		to_sas_host_attrs(shost)->q = q;
-
-	if (rphy)
-		q->queuedata = rphy;
-	else
-		q->queuedata = shost;
-
 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
 	queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
 	return 0;
-
-out_cleanup_queue:
-	blk_cleanup_queue(q);
-	return error;
-}
-
-static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
-{
-	struct request_queue *q;
-
-	if (rphy)
-		q = rphy->q;
-	else
-		q = to_sas_host_attrs(shost)->q;
-
-	if (!q)
-		return;
-
-	bsg_unregister_queue(q);
 }
 
 /*
@@ -321,9 +259,10 @@ static int sas_host_remove(struct transport_container *tc, struct device *dev,
 			   struct device *cdev)
 {
 	struct Scsi_Host *shost = dev_to_shost(dev);
+	struct request_queue *q = to_sas_host_attrs(shost)->q;
 
-	sas_bsg_remove(shost, NULL);
-
+	if (q)
+		bsg_unregister_queue(q);
 	return 0;
 }
 
@@ -421,6 +360,9 @@ sas_tlr_supported(struct scsi_device *sdev)
 	char *buffer = kzalloc(vpd_len, GFP_KERNEL);
 	int ret = 0;
 
+	if (!buffer)
+		goto out;
+
 	if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len))
 		goto out;
 
@@ -1710,7 +1652,8 @@ sas_rphy_remove(struct sas_rphy *rphy)
 	}
 
 	sas_rphy_unlink(rphy);
-	sas_bsg_remove(NULL, rphy);
+	if (rphy->q)
+		bsg_unregister_queue(rphy->q);
 	transport_remove_device(dev);
 	device_del(dev);
 }
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index f617021c94f740e6dd8fe8ac2526405a4319c6d2..4f6f01cf9968e70c81e4dbd6d5603a3ae4fa4f1c 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -78,7 +78,7 @@ static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
  * parameters must be such that multipath can detect failed paths timely.
  * Hence do not allow all three parameters to be disabled simultaneously.
  */
-int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo)
+int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, long dev_loss_tmo)
 {
 	if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
 		return -EINVAL;
@@ -556,8 +556,11 @@ int srp_reconnect_rport(struct srp_rport *rport)
 		 */
 		shost_for_each_device(sdev, shost) {
 			mutex_lock(&sdev->state_mutex);
-			if (sdev->sdev_state == SDEV_OFFLINE)
+			if (sdev->sdev_state == SDEV_OFFLINE) {
 				sdev->sdev_state = SDEV_RUNNING;
+				sysfs_notify(&sdev->sdev_gendev.kobj,
+					     NULL, "state");
+			}
 			mutex_unlock(&sdev->state_mutex);
 		}
 	} else if (rport->state == SRP_RPORT_RUNNING) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e2647f2d44304b0ded65ebb0f55d629c7289d0b7..11c1738c21000862718ec4f60a663409a11c0b6a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -312,7 +312,7 @@ protection_type_store(struct device *dev, struct device_attribute *attr,
 	if (err)
 		return err;
 
-	if (val >= 0 && val <= T10_PI_TYPE3_PROTECTION)
+	if (val <= T10_PI_TYPE3_PROTECTION)
 		sdkp->protection_type = val;
 
 	return count;
@@ -1013,7 +1013,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
 	ret = scsi_init_io(SCpnt);
 	if (ret != BLKPREP_OK)
 		goto out;
-	SCpnt = rq->special;
+	WARN_ON_ONCE(SCpnt != rq->special);
 
 	/* from here on until we're complete, any goto out
 	 * is used for a killable error condition */
@@ -3226,7 +3226,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 
 	gd->major = sd_major((index & 0xf0) >> 4);
 	gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
-	gd->minors = SD_MINORS;
 
 	gd->fops = &sd_fops;
 	gd->private_data = &sdkp->driver;
@@ -3540,7 +3539,7 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
 			 * doesn't support sync. There's not much to do and
 			 * suspend shouldn't fail.
 			 */
-			 ret = 0;
+			ret = 0;
 		}
 	}
 
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 8927f9f54ad926f5b50439e23685a35b24c50edf..11826c5c2dd40d3d16e324b55ee2d0b3a896ef3f 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -51,6 +51,13 @@ struct ses_component {
 	u64 addr;
 };
 
+static bool ses_page2_supported(struct enclosure_device *edev)
+{
+	struct ses_device *ses_dev = edev->scratch;
+
+	return (ses_dev->page2 != NULL);
+}
+
 static int ses_probe(struct device *dev)
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
@@ -179,7 +186,8 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
 	unsigned char *type_ptr = ses_dev->page1_types;
 	unsigned char *desc_ptr = ses_dev->page2 + 8;
 
-	ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
+	if (ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len) < 0)
+		return NULL;
 
 	for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
 		for (j = 0; j < type_ptr[1]; j++) {
@@ -203,6 +211,10 @@ static void ses_get_fault(struct enclosure_device *edev,
 {
 	unsigned char *desc;
 
+	if (!ses_page2_supported(edev)) {
+		ecomp->fault = 0;
+		return;
+	}
 	desc = ses_get_page2_descriptor(edev, ecomp);
 	if (desc)
 		ecomp->fault = (desc[3] & 0x60) >> 4;
@@ -215,6 +227,9 @@ static int ses_set_fault(struct enclosure_device *edev,
 	unsigned char desc[4];
 	unsigned char *desc_ptr;
 
+	if (!ses_page2_supported(edev))
+		return -EINVAL;
+
 	desc_ptr = ses_get_page2_descriptor(edev, ecomp);
 
 	if (!desc_ptr)
@@ -242,6 +257,10 @@ static void ses_get_status(struct enclosure_device *edev,
 {
 	unsigned char *desc;
 
+	if (!ses_page2_supported(edev)) {
+		ecomp->status = 0;
+		return;
+	}
 	desc = ses_get_page2_descriptor(edev, ecomp);
 	if (desc)
 		ecomp->status = (desc[0] & 0x0f);
@@ -252,6 +271,10 @@ static void ses_get_locate(struct enclosure_device *edev,
 {
 	unsigned char *desc;
 
+	if (!ses_page2_supported(edev)) {
+		ecomp->locate = 0;
+		return;
+	}
 	desc = ses_get_page2_descriptor(edev, ecomp);
 	if (desc)
 		ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
@@ -264,6 +287,9 @@ static int ses_set_locate(struct enclosure_device *edev,
 	unsigned char desc[4];
 	unsigned char *desc_ptr;
 
+	if (!ses_page2_supported(edev))
+		return -EINVAL;
+
 	desc_ptr = ses_get_page2_descriptor(edev, ecomp);
 
 	if (!desc_ptr)
@@ -292,6 +318,9 @@ static int ses_set_active(struct enclosure_device *edev,
 	unsigned char desc[4];
 	unsigned char *desc_ptr;
 
+	if (!ses_page2_supported(edev))
+		return -EINVAL;
+
 	desc_ptr = ses_get_page2_descriptor(edev, ecomp);
 
 	if (!desc_ptr)
@@ -328,6 +357,11 @@ static void ses_get_power_status(struct enclosure_device *edev,
 {
 	unsigned char *desc;
 
+	if (!ses_page2_supported(edev)) {
+		ecomp->power_status = 0;
+		return;
+	}
+
 	desc = ses_get_page2_descriptor(edev, ecomp);
 	if (desc)
 		ecomp->power_status = (desc[3] & 0x10) ? 0 : 1;
@@ -340,6 +374,9 @@ static int ses_set_power_status(struct enclosure_device *edev,
 	unsigned char desc[4];
 	unsigned char *desc_ptr;
 
+	if (!ses_page2_supported(edev))
+		return -EINVAL;
+
 	desc_ptr = ses_get_page2_descriptor(edev, ecomp);
 
 	if (!desc_ptr)
@@ -601,7 +638,7 @@ static int ses_intf_add(struct device *cdev,
 {
 	struct scsi_device *sdev = to_scsi_device(cdev->parent);
 	struct scsi_device *tmp_sdev;
-	unsigned char *buf = NULL, *hdr_buf, *type_ptr;
+	unsigned char *buf = NULL, *hdr_buf, *type_ptr, page;
 	struct ses_device *ses_dev;
 	u32 result;
 	int i, types, len, components = 0;
@@ -630,7 +667,8 @@ static int ses_intf_add(struct device *cdev,
 	if (!hdr_buf || !ses_dev)
 		goto err_init_free;
 
-	result = ses_recv_diag(sdev, 1, hdr_buf, INIT_ALLOC_SIZE);
+	page = 1;
+	result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE);
 	if (result)
 		goto recv_failed;
 
@@ -639,7 +677,7 @@ static int ses_intf_add(struct device *cdev,
 	if (!buf)
 		goto err_free;
 
-	result = ses_recv_diag(sdev, 1, buf, len);
+	result = ses_recv_diag(sdev, page, buf, len);
 	if (result)
 		goto recv_failed;
 
@@ -669,9 +707,10 @@ static int ses_intf_add(struct device *cdev,
 	ses_dev->page1_len = len;
 	buf = NULL;
 
-	result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE);
+	page = 2;
+	result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE);
 	if (result)
-		goto recv_failed;
+		goto page2_not_supported;
 
 	len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
 	buf = kzalloc(len, GFP_KERNEL);
@@ -688,7 +727,8 @@ static int ses_intf_add(struct device *cdev,
 
 	/* The additional information page --- allows us
 	 * to match up the devices */
-	result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
+	page = 10;
+	result = ses_recv_diag(sdev, page, hdr_buf, INIT_ALLOC_SIZE);
 	if (!result) {
 
 		len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
@@ -696,13 +736,14 @@ static int ses_intf_add(struct device *cdev,
 		if (!buf)
 			goto err_free;
 
-		result = ses_recv_diag(sdev, 10, buf, len);
+		result = ses_recv_diag(sdev, page, buf, len);
 		if (result)
 			goto recv_failed;
 		ses_dev->page10 = buf;
 		ses_dev->page10_len = len;
 		buf = NULL;
 	}
+page2_not_supported:
 	scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
 	if (!scomp)
 		goto err_free;
@@ -734,7 +775,7 @@ static int ses_intf_add(struct device *cdev,
 
  recv_failed:
 	sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n",
-		    result);
+		    page);
 	err = -ENODEV;
  err_free:
 	kfree(buf);
@@ -777,8 +818,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
 	if (!edev)
 		return;
 
-	enclosure_unregister(edev);
-
 	ses_dev = edev->scratch;
 	edev->scratch = NULL;
 
@@ -790,6 +829,7 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
 	kfree(edev->component[0].scratch);
 
 	put_device(&edev->edev);
+	enclosure_unregister(edev);
 }
 
 static void ses_intf_remove(struct device *cdev,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 84e782d8e7c3f0cb8dd4c3bdedb46b7060b18f1e..cf0e71db9e5146068294ab46fb1db1fa69addbc9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1081,8 +1081,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 		return blk_trace_setup(sdp->device->request_queue,
 				       sdp->disk->disk_name,
 				       MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
-				       NULL,
-				       (char *)arg);
+				       NULL, p);
 	case BLKTRACESTART:
 		return blk_trace_startstop(sdp->device->request_queue, 1);
 	case BLKTRACESTOP:
@@ -1233,6 +1232,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
 	unsigned long req_sz, len, sa;
 	Sg_scatter_hold *rsv_schp;
 	int k, length;
+	int ret = 0;
 
 	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
 		return -ENXIO;
@@ -1243,8 +1243,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
 	if (vma->vm_pgoff)
 		return -EINVAL;	/* want no offset */
 	rsv_schp = &sfp->reserve;
-	if (req_sz > rsv_schp->bufflen)
-		return -ENOMEM;	/* cannot map more than reserved buffer */
+	mutex_lock(&sfp->f_mutex);
+	if (req_sz > rsv_schp->bufflen) {
+		ret = -ENOMEM;	/* cannot map more than reserved buffer */
+		goto out;
+	}
 
 	sa = vma->vm_start;
 	length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
@@ -1258,7 +1261,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 	vma->vm_private_data = sfp;
 	vma->vm_ops = &sg_mmap_vm_ops;
-	return 0;
+out:
+	mutex_unlock(&sfp->f_mutex);
+	return ret;
 }
 
 static void
@@ -1735,9 +1740,12 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
 		    !sfp->res_in_use) {
 			sfp->res_in_use = 1;
 			sg_link_reserve(sfp, srp, dxfer_len);
-		} else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) {
+		} else if (hp->flags & SG_FLAG_MMAP_IO) {
+			res = -EBUSY; /* sfp->res_in_use == 1 */
+			if (dxfer_len > rsv_schp->bufflen)
+				res = -ENOMEM;
 			mutex_unlock(&sfp->f_mutex);
-			return -EBUSY;
+			return res;
 		} else {
 			res = sg_build_indirect(req_schp, sfp, dxfer_len);
 			if (res) {
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 80cfa93e407c3aab151c0165196dc5e8751b5c1f..5ed696dc9bbd53a5bf9cd063b6e3915069bde473 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -192,20 +192,6 @@ static inline void init_hpc_chain(struct ip22_hostdata *hdata)
 	hcp->desc.pnext = hdata->dma;
 }
 
-static int sgiwd93_bus_reset(struct scsi_cmnd *cmd)
-{
-	/* FIXME perform bus-specific reset */
-
-	/* FIXME 2: kill this function, and let midlayer fallback
-	   to the same result, calling wd33c93_host_reset() */
-
-	spin_lock_irq(cmd->device->host->host_lock);
-	wd33c93_host_reset(cmd);
-	spin_unlock_irq(cmd->device->host->host_lock);
-
-	return SUCCESS;
-}
-
 /*
  * Kludge alert - the SCSI code calls the abort and reset method with int
  * arguments not with pointers.  So this is going to blow up beautyfully
@@ -217,7 +203,6 @@ static struct scsi_host_template sgiwd93_template = {
 	.name			= "SGI WD93",
 	.queuecommand		= wd33c93_queuecommand,
 	.eh_abort_handler	= wd33c93_abort,
-	.eh_bus_reset_handler	= sgiwd93_bus_reset,
 	.eh_host_reset_handler	= wd33c93_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e164ffade38a7166690a9ecb206be1165e62278c..dc3a0542a2e83b161bd3ea44b41f450d9e48c7a9 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -688,6 +688,28 @@ struct pqi_config_table_heartbeat {
 	__le32	heartbeat_counter;
 };
 
+union pqi_reset_register {
+	struct {
+		u32	reset_type : 3;
+		u32	reserved : 2;
+		u32	reset_action : 3;
+		u32	hold_in_pd1 : 1;
+		u32	reserved2 : 23;
+	} bits;
+	u32	all_bits;
+};
+
+#define PQI_RESET_ACTION_RESET		0x1
+
+#define PQI_RESET_TYPE_NO_RESET		0x0
+#define PQI_RESET_TYPE_SOFT_RESET	0x1
+#define PQI_RESET_TYPE_FIRM_RESET	0x2
+#define PQI_RESET_TYPE_HARD_RESET	0x3
+
+#define PQI_RESET_ACTION_COMPLETED	0x2
+
+#define PQI_RESET_POLL_INTERVAL_MSECS	100
+
 #define PQI_MAX_OUTSTANDING_REQUESTS		((u32)~0)
 #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP	32
 #define PQI_MAX_TRANSFER_SIZE			(1024U * 1024U)
@@ -995,6 +1017,7 @@ struct pqi_ctrl_info {
 	u8		inbound_spanning_supported : 1;
 	u8		outbound_spanning_supported : 1;
 	u8		pqi_mode_enabled : 1;
+	u8		pqi_reset_quiesce_supported : 1;
 
 	struct list_head scsi_device_list;
 	spinlock_t	scsi_device_list_lock;
@@ -1056,9 +1079,9 @@ enum pqi_ctrl_mode {
 #define BMIC_SENSE_CONTROLLER_PARAMETERS	0x64
 #define BMIC_SENSE_SUBSYSTEM_INFORMATION	0x66
 #define BMIC_WRITE_HOST_WELLNESS		0xa5
-#define BMIC_CACHE_FLUSH			0xc2
+#define BMIC_FLUSH_CACHE			0xc2
 
-#define SA_CACHE_FLUSH				0x1
+#define SA_FLUSH_CACHE				0x1
 
 #define MASKED_DEVICE(lunid)			((lunid)[3] & 0xc0)
 #define CISS_GET_LEVEL_2_BUS(lunid)		((lunid)[7] & 0x3f)
@@ -1164,6 +1187,23 @@ struct bmic_identify_physical_device {
 	u8	padding_to_multiple_of_512[9];
 };
 
+struct bmic_flush_cache {
+	u8	disable_flag;
+	u8	system_power_action;
+	u8	ndu_flush;
+	u8	shutdown_event;
+	u8	reserved[28];
+};
+
+/* for shutdown_event member of struct bmic_flush_cache */
+enum bmic_flush_cache_shutdown_event {
+	NONE_CACHE_FLUSH_ONLY = 0,
+	SHUTDOWN = 1,
+	HIBERNATE = 2,
+	SUSPEND = 3,
+	RESTART = 4
+};
+
 #pragma pack()
 
 int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index cb8f886e705cbd3e0881752a41787be033c405b2..83bdbd84eb01f427e9d74d56526b2e1f1ceb9e33 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -40,11 +40,11 @@
 #define BUILD_TIMESTAMP
 #endif
 
-#define DRIVER_VERSION		"1.0.4-100"
+#define DRIVER_VERSION		"1.1.2-125"
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		0
-#define DRIVER_RELEASE		4
-#define DRIVER_REVISION		100
+#define DRIVER_MINOR		1
+#define DRIVER_RELEASE		2
+#define DRIVER_REVISION		125
 
 #define DRIVER_NAME		"Microsemi PQI Driver (v" \
 				DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -431,10 +431,10 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
 		cdb[1] = CISS_GET_RAID_MAP;
 		put_unaligned_be32(buffer_length, &cdb[6]);
 		break;
-	case SA_CACHE_FLUSH:
+	case SA_FLUSH_CACHE:
 		request->data_direction = SOP_WRITE_FLAG;
 		cdb[0] = BMIC_WRITE;
-		cdb[6] = BMIC_CACHE_FLUSH;
+		cdb[6] = BMIC_FLUSH_CACHE;
 		put_unaligned_be16(buffer_length, &cdb[7]);
 		break;
 	case BMIC_IDENTIFY_CONTROLLER:
@@ -585,14 +585,13 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
 	return rc;
 }
 
-#define SA_CACHE_FLUSH_BUFFER_LENGTH	4
-
-static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
+static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
+	enum bmic_flush_cache_shutdown_event shutdown_event)
 {
 	int rc;
 	struct pqi_raid_path_request request;
 	int pci_direction;
-	u8 *buffer;
+	struct bmic_flush_cache *flush_cache;
 
 	/*
 	 * Don't bother trying to flush the cache if the controller is
@@ -601,13 +600,15 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
 	if (pqi_ctrl_offline(ctrl_info))
 		return -ENXIO;
 
-	buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
-	if (!buffer)
+	flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
+	if (!flush_cache)
 		return -ENOMEM;
 
+	flush_cache->shutdown_event = shutdown_event;
+
 	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
-		SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
+		SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
+		sizeof(*flush_cache), 0, &pci_direction);
 	if (rc)
 		goto out;
 
@@ -618,7 +619,7 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
 		pci_direction);
 
 out:
-	kfree(buffer);
+	kfree(flush_cache);
 
 	return rc;
 }
@@ -3007,11 +3008,9 @@ static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
 			break;
 		case IRQ_MODE_INTX:
 			pqi_configure_legacy_intx(ctrl_info, true);
-			sis_disable_msix(ctrl_info);
 			sis_enable_intx(ctrl_info);
 			break;
 		case IRQ_MODE_NONE:
-			sis_disable_msix(ctrl_info);
 			break;
 		}
 		break;
@@ -3019,14 +3018,12 @@ static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
 		switch (new_mode) {
 		case IRQ_MODE_MSIX:
 			pqi_configure_legacy_intx(ctrl_info, false);
-			sis_disable_intx(ctrl_info);
 			sis_enable_msix(ctrl_info);
 			break;
 		case IRQ_MODE_INTX:
 			break;
 		case IRQ_MODE_NONE:
 			pqi_configure_legacy_intx(ctrl_info, false);
-			sis_disable_intx(ctrl_info);
 			break;
 		}
 		break;
@@ -5498,6 +5495,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
 	case XFER_NONE:
 	case XFER_WRITE:
 	case XFER_READ:
+	case XFER_READ | XFER_WRITE:
 		break;
 	default:
 		return -EINVAL;
@@ -5538,6 +5536,9 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
 	case XFER_READ:
 		request.data_direction = SOP_READ_FLAG;
 		break;
+	case XFER_READ | XFER_WRITE:
+		request.data_direction = SOP_BIDIRECTIONAL;
+		break;
 	}
 
 	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
@@ -5889,28 +5890,62 @@ static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
 	scsi_host_put(shost);
 }
 
-#define PQI_RESET_ACTION_RESET		0x1
+static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc = 0;
+	struct pqi_device_registers __iomem *pqi_registers;
+	unsigned long timeout;
+	unsigned int timeout_msecs;
+	union pqi_reset_register reset_reg;
 
-#define PQI_RESET_TYPE_NO_RESET		0x0
-#define PQI_RESET_TYPE_SOFT_RESET	0x1
-#define PQI_RESET_TYPE_FIRM_RESET	0x2
-#define PQI_RESET_TYPE_HARD_RESET	0x3
+	pqi_registers = ctrl_info->pqi_registers;
+	timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
+	timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
+
+	while (1) {
+		msleep(PQI_RESET_POLL_INTERVAL_MSECS);
+		reset_reg.all_bits = readl(&pqi_registers->device_reset);
+		if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
+			break;
+		pqi_check_ctrl_health(ctrl_info);
+		if (pqi_ctrl_offline(ctrl_info)) {
+			rc = -ENXIO;
+			break;
+		}
+		if (time_after(jiffies, timeout)) {
+			rc = -ETIMEDOUT;
+			break;
+		}
+	}
+
+	return rc;
+}
 
 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
 {
 	int rc;
-	u32 reset_params;
+	union pqi_reset_register reset_reg;
+
+	if (ctrl_info->pqi_reset_quiesce_supported) {
+		rc = sis_pqi_reset_quiesce(ctrl_info);
+		if (rc) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"PQI reset failed during quiesce with error %d\n",
+				rc);
+			return rc;
+		}
+	}
 
-	reset_params = (PQI_RESET_ACTION_RESET << 5) |
-		PQI_RESET_TYPE_HARD_RESET;
+	reset_reg.all_bits = 0;
+	reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
+	reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
 
-	writel(reset_params,
-		&ctrl_info->pqi_registers->device_reset);
+	writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
 
-	rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+	rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
 	if (rc)
 		dev_err(&ctrl_info->pci_dev->dev,
-			"PQI reset failed\n");
+			"PQI reset failed with error %d\n", rc);
 
 	return rc;
 }
@@ -6007,7 +6042,12 @@ static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
 	rc = pqi_reset(ctrl_info);
 	if (rc)
 		return rc;
-	sis_reenable_sis_mode(ctrl_info);
+	rc = sis_reenable_sis_mode(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"re-enabling SIS mode failed with error %d\n", rc);
+		return rc;
+	}
 	pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
 
 	return 0;
@@ -6659,7 +6699,8 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
 	 * Write all data in the controller's battery-backed cache to
 	 * storage.
 	 */
-	rc = pqi_flush_cache(ctrl_info);
+	rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
+	pqi_reset(ctrl_info);
 	if (rc == 0)
 		return;
 
@@ -6703,7 +6744,7 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat
 	pqi_cancel_rescan_worker(ctrl_info);
 	pqi_wait_until_scan_finished(ctrl_info);
 	pqi_wait_until_lun_reset_finished(ctrl_info);
-	pqi_flush_cache(ctrl_info);
+	pqi_flush_cache(ctrl_info, SUSPEND);
 	pqi_ctrl_block_requests(ctrl_info);
 	pqi_ctrl_wait_until_quiesced(ctrl_info);
 	pqi_wait_until_inbound_queues_empty(ctrl_info);
@@ -6781,7 +6822,7 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_ADAPTEC2, 0x0605)
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0608)
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
@@ -6811,6 +6852,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADAPTEC2, 0x0806)
 	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0807)
+	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADAPTEC2, 0x0900)
@@ -6847,6 +6892,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADAPTEC2, 0x0908)
 	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x090a)
+	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADAPTEC2, 0x1200)
@@ -6879,6 +6928,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADAPTEC2, 0x1380)
 	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_DELL, 0x1fe0)
+	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_HP, 0x0600)
@@ -6897,11 +6950,7 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x0604)
-	},
-	{
-		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x0606)
+			       PCI_VENDOR_ID_HP, 0x0609)
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
@@ -6927,14 +6976,6 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_HP, 0x0655)
 	},
-	{
-		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x0656)
-	},
-	{
-		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x0657)
-	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_HP, 0x0700)
@@ -6955,14 +6996,6 @@ static const struct pci_device_id pqi_pci_id_table[] = {
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_HP, 0x1101)
 	},
-	{
-		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x1102)
-	},
-	{
-		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
-			       PCI_VENDOR_ID_HP, 0x1150)
-	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_ANY_ID, PCI_ANY_ID)
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 0d89d3728b435f392c893ee1925e47e40cfda582..b209a35e482ef87348a5ea6a7554443742077655 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -329,14 +329,6 @@ static int pqi_sas_phy_speed(struct sas_phy *phy,
 	return -EINVAL;
 }
 
-/* SMP = Serial Management Protocol */
-
-static int pqi_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
-	struct request *req)
-{
-	return -EINVAL;
-}
-
 struct sas_function_template pqi_sas_transport_functions = {
 	.get_linkerrors = pqi_sas_get_linkerrors,
 	.get_enclosure_identifier = pqi_sas_get_enclosure_identifier,
@@ -346,5 +338,4 @@ struct sas_function_template pqi_sas_transport_functions = {
 	.phy_setup = pqi_sas_phy_setup,
 	.phy_release = pqi_sas_phy_release,
 	.set_phy_speed = pqi_sas_phy_speed,
-	.smp_handler = pqi_sas_smp_handler,
 };
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index e55dfcf200e5969526b33ab4b2bbfb470f328fcc..5141bd4c9f06135e5e3a5dc30277200a112dc998 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -34,11 +34,13 @@
 #define SIS_REENABLE_SIS_MODE			0x1
 #define SIS_ENABLE_MSIX				0x40
 #define SIS_ENABLE_INTX				0x80
-#define SIS_SOFT_RESET				0x100
-#define SIS_TRIGGER_SHUTDOWN			0x800000
 #define SIS_CMD_READY				0x200
+#define SIS_TRIGGER_SHUTDOWN			0x800000
+#define SIS_PQI_RESET_QUIESCE			0x1000000
+
 #define SIS_CMD_COMPLETE			0x1000
 #define SIS_CLEAR_CTRL_TO_HOST_DOORBELL		0x1000
+
 #define SIS_CMD_STATUS_SUCCESS			0x1
 #define SIS_CMD_COMPLETE_TIMEOUT_SECS		30
 #define SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS	10
@@ -47,6 +49,7 @@
 #define SIS_EXTENDED_PROPERTIES_SUPPORTED	0x800000
 #define SIS_SMARTARRAY_FEATURES_SUPPORTED	0x2
 #define SIS_PQI_MODE_SUPPORTED			0x4
+#define SIS_PQI_RESET_QUIESCE_SUPPORTED		0x8
 #define SIS_REQUIRED_EXTENDED_PROPERTIES	\
 	(SIS_SMARTARRAY_FEATURES_SUPPORTED | SIS_PQI_MODE_SUPPORTED)
 
@@ -258,6 +261,9 @@ int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info)
 		SIS_REQUIRED_EXTENDED_PROPERTIES)
 		return -ENODEV;
 
+	if (extended_properties & SIS_PQI_RESET_QUIESCE_SUPPORTED)
+		ctrl_info->pqi_reset_quiesce_supported = true;
+
 	return 0;
 }
 
@@ -336,9 +342,10 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
 
 #define SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS	30
 
-static void sis_wait_for_doorbell_bit_to_clear(
+static int sis_wait_for_doorbell_bit_to_clear(
 	struct pqi_ctrl_info *ctrl_info, u32 bit)
 {
+	int rc = 0;
 	u32 doorbell_register;
 	unsigned long timeout;
 
@@ -350,78 +357,38 @@ static void sis_wait_for_doorbell_bit_to_clear(
 		if ((doorbell_register & bit) == 0)
 			break;
 		if (readl(&ctrl_info->registers->sis_firmware_status) &
-			SIS_CTRL_KERNEL_PANIC)
+			SIS_CTRL_KERNEL_PANIC) {
+			rc = -ENODEV;
 			break;
+		}
 		if (time_after(jiffies, timeout)) {
 			dev_err(&ctrl_info->pci_dev->dev,
 				"doorbell register bit 0x%x not cleared\n",
 				bit);
+			rc = -ETIMEDOUT;
 			break;
 		}
 		usleep_range(1000, 2000);
 	}
-}
-
-/* Enable MSI-X interrupts on the controller. */
-
-void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
-{
-	u32 doorbell_register;
-
-	doorbell_register =
-		readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-	doorbell_register |= SIS_ENABLE_MSIX;
-
-	writel(doorbell_register,
-		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
 
-	sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_MSIX);
+	return rc;
 }
 
-/* Disable MSI-X interrupts on the controller. */
-
-void sis_disable_msix(struct pqi_ctrl_info *ctrl_info)
+static inline int sis_set_doorbell_bit(struct pqi_ctrl_info *ctrl_info, u32 bit)
 {
-	u32 doorbell_register;
-
-	doorbell_register =
-		readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-	doorbell_register &= ~SIS_ENABLE_MSIX;
+	writel(bit, &ctrl_info->registers->sis_host_to_ctrl_doorbell);
 
-	writel(doorbell_register,
-		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+	return sis_wait_for_doorbell_bit_to_clear(ctrl_info, bit);
 }
 
-void sis_enable_intx(struct pqi_ctrl_info *ctrl_info)
+void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
 {
-	u32 doorbell_register;
-
-	doorbell_register =
-		readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-	doorbell_register |= SIS_ENABLE_INTX;
-
-	writel(doorbell_register,
-		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-
-	sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_INTX);
+	sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_MSIX);
 }
 
-void sis_disable_intx(struct pqi_ctrl_info *ctrl_info)
-{
-	u32 doorbell_register;
-
-	doorbell_register =
-		readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-	doorbell_register &= ~SIS_ENABLE_INTX;
-
-	writel(doorbell_register,
-		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
-}
-
-void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
+void sis_enable_intx(struct pqi_ctrl_info *ctrl_info)
 {
-	writel(SIS_SOFT_RESET,
-		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+	sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_INTX);
 }
 
 void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info)
@@ -434,38 +401,14 @@ void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info)
 		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
 }
 
-#define SIS_MODE_READY_TIMEOUT_SECS	30
+int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info)
+{
+	return sis_set_doorbell_bit(ctrl_info, SIS_PQI_RESET_QUIESCE);
+}
 
 int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
 {
-	int rc;
-	unsigned long timeout;
-	struct pqi_ctrl_registers __iomem *registers;
-	u32 doorbell;
-
-	registers = ctrl_info->registers;
-
-	writel(SIS_REENABLE_SIS_MODE,
-		&registers->sis_host_to_ctrl_doorbell);
-
-	rc = 0;
-	timeout = (SIS_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
-
-	while (1) {
-		doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
-		if ((doorbell & SIS_REENABLE_SIS_MODE) == 0)
-			break;
-		if (time_after(jiffies, timeout)) {
-			rc = -ETIMEDOUT;
-			break;
-		}
-	}
-
-	if (rc)
-		dev_err(&ctrl_info->pci_dev->dev,
-			"re-enabling SIS mode failed\n");
-
-	return rc;
+	return sis_set_doorbell_bit(ctrl_info, SIS_REENABLE_SIS_MODE);
 }
 
 void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value)
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 983184b693735b1ad2fe3eb706b70d2b6f4e02e9..2bf889dbf5abaf2e4796118125ae3f335df2c429 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -27,11 +27,9 @@ int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info);
 int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info);
 int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
 void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
-void sis_disable_msix(struct pqi_ctrl_info *ctrl_info);
 void sis_enable_intx(struct pqi_ctrl_info *ctrl_info);
-void sis_disable_intx(struct pqi_ctrl_info *ctrl_info);
-void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
 void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info);
+int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info);
 int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
 void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
 u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index a8f630213a1a0fce250e23b00a9452676389b159..9be34d37c3567169770563304dd4a927e22b4bc9 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -393,7 +393,7 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
 	ret = scsi_init_io(SCpnt);
 	if (ret != BLKPREP_OK)
 		goto out;
-	SCpnt = rq->special;
+	WARN_ON_ONCE(SCpnt != rq->special);
 	cd = scsi_cd(rq->rq_disk);
 
 	/* from here on until we're complete, any goto out
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3cc8d67783a1d1ab53d1acfdc2a25caa9baa6a47..5e7200f0587364c8de771a56d4c7105c8f2400c6 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1640,6 +1640,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	put_cpu();
 
 	if (ret == -EAGAIN) {
+		if (payload_sz > sizeof(cmd_request->mpb))
+			kfree(payload);
 		/* no more space */
 		return SCSI_MLQUEUE_DEVICE_BUSY;
 	}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index e64b0c542f950aae912a4615f8578a73df31ce6e..9492638296c86183d01bae23e5e5cabbac6a2f10 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -46,7 +46,7 @@
 #define NCR5380_write(reg, value)       out_8(hostdata->io + (reg), value)
 
 #define NCR5380_queue_command           sun3scsi_queue_command
-#define NCR5380_bus_reset               sun3scsi_bus_reset
+#define NCR5380_host_reset              sun3scsi_host_reset
 #define NCR5380_abort                   sun3scsi_abort
 #define NCR5380_info                    sun3scsi_info
 
@@ -495,7 +495,7 @@ static struct scsi_host_template sun3_scsi_template = {
 	.info			= sun3scsi_info,
 	.queuecommand		= sun3scsi_queue_command,
 	.eh_abort_handler	= sun3scsi_abort,
-	.eh_bus_reset_handler	= sun3scsi_bus_reset,
+	.eh_host_reset_handler	= sun3scsi_host_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_NONE,
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 6b349e3018692435b3523f8d0cef4e4daaf6f3ef..15ff285a9f8f746ee7946e65f4f6264a5a524f67 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -759,7 +759,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru
 	/*
 	 * Maximum synchronous period factor supported by the chip.
 	 */
-	period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
+	period = div64_ul(11 * div_10M[np->clock_divn - 1], 4 * np->clock_khz);
 	np->maxsync = period > 2540 ? 254 : period / 10;
 
 	/*
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 5bc9dc14e075060aaacfba82a82ed21da08274f3..794a4600e952a4222bf22c596ae584f520dccfdb 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -438,7 +438,7 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
 {
 	dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
 	dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
-		hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
+		hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
 	dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
 		hba->saved_err, hba->saved_uic_err);
 	dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 9be211d68b1596b4a9b482fa4e17888b6f559e39..7c28e8d4955a965805cdde3d2ddb07a7f8c5d6ea 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -818,7 +818,6 @@ static struct scsi_host_template virtscsi_host_template_single = {
 	.eh_timed_out = virtscsi_eh_timed_out,
 	.slave_alloc = virtscsi_device_alloc,
 
-	.can_queue = 1024,
 	.dma_boundary = UINT_MAX,
 	.use_clustering = ENABLE_CLUSTERING,
 	.target_alloc = virtscsi_target_alloc,
@@ -839,7 +838,6 @@ static struct scsi_host_template virtscsi_host_template_multi = {
 	.eh_timed_out = virtscsi_eh_timed_out,
 	.slave_alloc = virtscsi_device_alloc,
 
-	.can_queue = 1024,
 	.dma_boundary = UINT_MAX,
 	.use_clustering = ENABLE_CLUSTERING,
 	.target_alloc = virtscsi_target_alloc,
@@ -972,6 +970,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
 	if (err)
 		goto virtscsi_init_failed;
 
+	shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
+
 	cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
 	shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
 	shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 9e09da412b92299c4f903764e4c0cebf71a5b845..74be04f2357c0f9ca9110bf6f8b05f147b4370b0 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1578,6 +1578,7 @@ wd33c93_host_reset(struct scsi_cmnd * SCpnt)
 	int i;
 
 	instance = SCpnt->device->host;
+	spin_lock_irq(instance->host_lock);
 	hostdata = (struct WD33C93_hostdata *) instance->hostdata;
 
 	printk("scsi%d: reset. ", instance->host_no);
@@ -1603,6 +1604,7 @@ wd33c93_host_reset(struct scsi_cmnd * SCpnt)
 	reset_wd33c93(instance);
 	SCpnt->result = DID_RESET << 16;
 	enable_irq(instance->irq);
+	spin_unlock_irq(instance->host_lock);
 	return SUCCESS;
 }
 
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 53748d61e20e7fb20877b4569a9b68c7caa66699..89e2cfe7d1cc0ffdef1334ca73a59729ca69f70b 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -205,16 +205,6 @@ static int device_reset(struct scsi_cmnd *srb)
 	return SUCCESS;
 }
 
-/* Simulate a SCSI bus reset by resetting the device's USB port. */
-static int bus_reset(struct scsi_cmnd *srb)
-{
-	struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
-
-	dev_info(&dev->pci->dev, "%s called\n", __func__);
-
-	return SUCCESS;
-}
-
 /*
  * this defines our host template, with which we'll allocate hosts
  */
@@ -231,7 +221,6 @@ static struct scsi_host_template rtsx_host_template = {
 	/* error and abort handlers */
 	.eh_abort_handler =		command_abort,
 	.eh_device_reset_handler =	device_reset,
-	.eh_bus_reset_handler =		bus_reset,
 
 	/* queue commands only, only one command per LUN */
 	.can_queue =			1,
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 8567e447891e8434c6ba5a9e7197ff56b47abaaa..419dba89af065b431d7923cbad0de4c309262efa 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -47,9 +47,8 @@ MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
 MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
 
 struct visordisk_info {
+	struct scsi_device *sdev;
 	u32 valid;
-	/* Disk Path */
-	u32 channel, id, lun;
 	atomic_t ios_threshold;
 	atomic_t error_count;
 	struct visordisk_info *next;
@@ -105,12 +104,6 @@ struct visorhba_devices_open {
 	struct visorhba_devdata *devdata;
 };
 
-#define for_each_vdisk_match(iter, list, match) \
-	for (iter = &list->head; iter->next; iter = iter->next) \
-		if ((iter->channel == match->channel) && \
-		    (iter->id == match->id) && \
-		    (iter->lun == match->lun))
-
 /*
  * visor_thread_start - Starts a thread for the device
  * @threadfn:   Function the thread starts
@@ -313,10 +306,9 @@ static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
  * Return: Int representing whether command was queued successfully or not
  */
 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
-				    struct scsi_cmnd *scsicmd)
+				    struct scsi_device *scsidev)
 {
 	struct uiscmdrsp *cmdrsp;
-	struct scsi_device *scsidev = scsicmd->device;
 	struct visorhba_devdata *devdata =
 		(struct visorhba_devdata *)scsidev->host->hostdata;
 	int notifyresult = 0xffff;
@@ -364,12 +356,6 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
 	dev_dbg(&scsidev->sdev_gendev,
 		"visorhba: taskmgmt type=%d success; result=0x%x\n",
 		 tasktype, notifyresult);
-	if (tasktype == TASK_MGMT_ABORT_TASK)
-		scsicmd->result = DID_ABORT << 16;
-	else
-		scsicmd->result = DID_RESET << 16;
-
-	scsicmd->scsi_done(scsicmd);
 	cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
 	return SUCCESS;
 
@@ -392,17 +378,20 @@ static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
 	/* issue TASK_MGMT_ABORT_TASK */
 	struct scsi_device *scsidev;
 	struct visordisk_info *vdisk;
-	struct visorhba_devdata *devdata;
+	int rtn;
 
 	scsidev = scsicmd->device;
-	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
-	for_each_vdisk_match(vdisk, devdata, scsidev) {
-		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
-			atomic_inc(&vdisk->error_count);
-		else
-			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+	vdisk = scsidev->hostdata;
+	if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
+		atomic_inc(&vdisk->error_count);
+	else
+		atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+	rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
+	if (rtn == SUCCESS) {
+		scsicmd->result = DID_ABORT << 16;
+		scsicmd->scsi_done(scsicmd);
 	}
-	return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
+	return rtn;
 }
 
 /*
@@ -416,17 +405,20 @@ static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
 	/* issue TASK_MGMT_LUN_RESET */
 	struct scsi_device *scsidev;
 	struct visordisk_info *vdisk;
-	struct visorhba_devdata *devdata;
+	int rtn;
 
 	scsidev = scsicmd->device;
-	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
-	for_each_vdisk_match(vdisk, devdata, scsidev) {
-		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
-			atomic_inc(&vdisk->error_count);
-		else
-			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+	vdisk = scsidev->hostdata;
+	if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
+		atomic_inc(&vdisk->error_count);
+	else
+		atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+	rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
+	if (rtn == SUCCESS) {
+		scsicmd->result = DID_RESET << 16;
+		scsicmd->scsi_done(scsicmd);
 	}
-	return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
+	return rtn;
 }
 
 /*
@@ -440,17 +432,22 @@ static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
 {
 	struct scsi_device *scsidev;
 	struct visordisk_info *vdisk;
-	struct visorhba_devdata *devdata;
+	int rtn;
 
 	scsidev = scsicmd->device;
-	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
-	for_each_vdisk_match(vdisk, devdata, scsidev) {
+	shost_for_each_device(scsidev, scsidev->host) {
+		vdisk = scsidev->hostdata;
 		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
 			atomic_inc(&vdisk->error_count);
 		else
 			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
 	}
-	return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
+	rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
+	if (rtn == SUCCESS) {
+		scsicmd->result = DID_RESET << 16;
+		scsicmd->scsi_done(scsicmd);
+	}
+	return rtn;
 }
 
 /*
@@ -604,27 +601,24 @@ static int visorhba_slave_alloc(struct scsi_device *scsidev)
 	 * LLD can alloc any struct & do init if needed.
 	 */
 	struct visordisk_info *vdisk;
-	struct visordisk_info *tmpvdisk;
 	struct visorhba_devdata *devdata;
 	struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
 
+	/* already allocated return success */
+	if (scsidev->hostdata)
+		return 0;
+
 	/* even though we errored, treat as success */
 	devdata = (struct visorhba_devdata *)scsihost->hostdata;
 	if (!devdata)
 		return 0;
 
-	/* already allocated return success */
-	for_each_vdisk_match(vdisk, devdata, scsidev)
-		return 0;
-
-	tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
-	if (!tmpvdisk)
+	vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
+	if (!vdisk)
 		return -ENOMEM;
 
-	tmpvdisk->channel = scsidev->channel;
-	tmpvdisk->id = scsidev->id;
-	tmpvdisk->lun = scsidev->lun;
-	vdisk->next = tmpvdisk;
+	vdisk->sdev = scsidev;
+	scsidev->hostdata = vdisk;
 	return 0;
 }
 
@@ -637,17 +631,11 @@ static void visorhba_slave_destroy(struct scsi_device *scsidev)
 	/* midlevel calls this after device has been quiesced and
 	 * before it is to be deleted.
 	 */
-	struct visordisk_info *vdisk, *delvdisk;
-	struct visorhba_devdata *devdata;
-	struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
+	struct visordisk_info *vdisk;
 
-	devdata = (struct visorhba_devdata *)scsihost->hostdata;
-	for_each_vdisk_match(vdisk, devdata, scsidev) {
-		delvdisk = vdisk->next;
-		vdisk->next = delvdisk->next;
-		kfree(delvdisk);
-		return;
-	}
+	vdisk = scsidev->hostdata;
+	scsidev->hostdata = NULL;
+	kfree(vdisk);
 }
 
 static struct scsi_host_template visorhba_driver_template = {
@@ -823,7 +811,6 @@ static int visorhba_serverdown(struct visorhba_devdata *devdata)
 static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
 			      struct scsi_cmnd *scsicmd)
 {
-	struct visorhba_devdata *devdata;
 	struct visordisk_info *vdisk;
 	struct scsi_device *scsidev;
 
@@ -836,12 +823,10 @@ static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
 	    (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
 		return;
 	/* Okay see what our error_count is here.... */
-	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
-	for_each_vdisk_match(vdisk, devdata, scsidev) {
-		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
-			atomic_inc(&vdisk->error_count);
-			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
-		}
+	vdisk = scsidev->hostdata;
+	if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
+		atomic_inc(&vdisk->error_count);
+		atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
 	}
 }
 
@@ -881,7 +866,6 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
 	char *this_page_orig;
 	int bufind = 0;
 	struct visordisk_info *vdisk;
-	struct visorhba_devdata *devdata;
 
 	scsidev = scsicmd->device;
 	if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
@@ -918,13 +902,11 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
 		}
 		kfree(buf);
 	} else {
-		devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
-		for_each_vdisk_match(vdisk, devdata, scsidev) {
-			if (atomic_read(&vdisk->ios_threshold) > 0) {
-				atomic_dec(&vdisk->ios_threshold);
-				if (atomic_read(&vdisk->ios_threshold) == 0)
-					atomic_set(&vdisk->error_count, 0);
-			}
+		vdisk = scsidev->hostdata;
+		if (atomic_read(&vdisk->ios_threshold) > 0) {
+			atomic_dec(&vdisk->ios_threshold);
+			if (atomic_read(&vdisk->ios_threshold) == 0)
+				atomic_set(&vdisk->error_count, 0);
 		}
 	}
 }
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 5ef014ba6ae86917f34e6ebbd855845e3bf71732..cfb1e3bbd434715410e1f78914bb6ac3eb004b76 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -737,7 +737,7 @@ static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
 	return FAILED;
 }
 
-static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
+static int uas_eh_device_reset_handler(struct scsi_cmnd *cmnd)
 {
 	struct scsi_device *sdev = cmnd->device;
 	struct uas_dev_info *devinfo = sdev->hostdata;
@@ -848,7 +848,7 @@ static struct scsi_host_template uas_host_template = {
 	.slave_alloc = uas_slave_alloc,
 	.slave_configure = uas_slave_configure,
 	.eh_abort_handler = uas_eh_abort_handler,
-	.eh_bus_reset_handler = uas_eh_bus_reset_handler,
+	.eh_device_reset_handler = uas_eh_device_reset_handler,
 	.this_id = -1,
 	.sg_tablesize = SG_NONE,
 	.skip_settle_delay = 1,
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 9aaa177e8209731cbe63aef3c7f1479ab2497851..eb30f3e09a4775b3f046ccc698f18a05210f17ad 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -296,7 +296,6 @@ static inline int virtqueue_add(struct virtqueue *_vq,
 	}
 #endif
 
-	BUG_ON(total_sg > vq->vring.num);
 	BUG_ON(total_sg == 0);
 
 	head = vq->free_head;
@@ -305,8 +304,10 @@ static inline int virtqueue_add(struct virtqueue *_vq,
 	 * buffers, then go indirect. FIXME: tune this threshold */
 	if (vq->indirect && total_sg > 1 && vq->vq.num_free)
 		desc = alloc_indirect(_vq, total_sg, gfp);
-	else
+	else {
 		desc = NULL;
+		WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect);
+	}
 
 	if (desc) {
 		/* Use a single buffer which doesn't continue */
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 637a20cfb237db4ab76a480172e918e6e48f849b..b1be0233ce353d93241bf1d1776d901637e9b95a 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -68,8 +68,9 @@ struct bsg_job {
 
 void bsg_job_done(struct bsg_job *job, int result,
 		  unsigned int reply_payload_rcv_len);
-struct request_queue *bsg_setup_queue(struct device *dev, char *name,
-		bsg_job_fn *job_fn, int dd_job_size);
+struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
+		bsg_job_fn *job_fn, int dd_job_size,
+		void (*release)(struct device *));
 void bsg_job_put(struct bsg_job *job);
 int __must_check bsg_job_get(struct bsg_job *job);
 
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 96f1baf62ab8cf72216a97b5df7eb3fd386cbc3d..de50d8a4cf414121a9f83e50df7738a5f530494a 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -412,6 +412,22 @@ static inline void rcu_preempt_sleep_check(void) { }
 	_r_a_p__v;							      \
 })
 
+/**
+ * rcu_swap_protected() - swap an RCU and a regular pointer
+ * @rcu_ptr: RCU pointer
+ * @ptr: regular pointer
+ * @c: the conditions under which the dereference will take place
+ *
+ * Perform swap(@rcu_ptr, @ptr) where @rcu_ptr is an RCU-annotated pointer and
+ * @c is the argument that is passed to the rcu_dereference_protected() call
+ * used to read that pointer.
+ */
+#define rcu_swap_protected(rcu_ptr, ptr, c) do {			\
+	typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c));	\
+	rcu_assign_pointer((rcu_ptr), (ptr));				\
+	(ptr) = __tmp;							\
+} while (0)
+
 /**
  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
  * @p: The pointer to read
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index cfaeed256ab23789dac204674b030b5e175bc647..6c0dc6155ee757b5e066b075316dc899f6dddddc 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -714,16 +714,13 @@ void sas_init_dev(struct domain_device *);
 void sas_task_abort(struct sas_task *);
 int sas_eh_abort_handler(struct scsi_cmnd *cmd);
 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd);
-int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd);
+int sas_eh_target_reset_handler(struct scsi_cmnd *cmd);
 
 extern void sas_target_destroy(struct scsi_target *);
 extern int sas_slave_alloc(struct scsi_device *);
 extern int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
 extern int sas_drain_work(struct sas_ha_struct *ha);
 
-extern int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
-			   struct request *req);
-
 extern void sas_ssp_task_response(struct device *dev, struct sas_task *task,
 				  struct ssp_response_iu *iu);
 struct sas_phy *sas_get_local_phy(struct domain_device *dev);
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 6af198d8120b098f31d674f15584ec7114832743..3d3f8b342e05d14e76d912251a36d6111ab4da88 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -58,6 +58,9 @@ struct scsi_pointer {
 #define SCMD_TAGGED		(1 << 0)
 #define SCMD_UNCHECKED_ISA_DMA	(1 << 1)
 #define SCMD_ZONE_WRITE_LOCK	(1 << 2)
+#define SCMD_INITIALIZED	(1 << 3)
+/* flags preserved across unprep / reprep */
+#define SCMD_PRESERVED_FLAGS	(SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
 
 struct scsi_cmnd {
 	struct scsi_request req;
@@ -159,7 +162,6 @@ static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
 	return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
 }
 
-extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
 extern void scsi_put_command(struct scsi_cmnd *);
 extern void scsi_finish_command(struct scsi_cmnd *cmd);
 
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 0979a5f3b69a30ce384db82728fb28ad6a136a6d..82e93ee94708c9f7de073b3e6c47826664525075 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -80,6 +80,18 @@ struct scsi_event {
 	 */
 };
 
+/**
+ * struct scsi_vpd - SCSI Vital Product Data
+ * @rcu: For kfree_rcu().
+ * @len: Length in bytes of @data.
+ * @data: VPD data as defined in various T10 SCSI standard documents.
+ */
+struct scsi_vpd {
+	struct rcu_head	rcu;
+	int		len;
+	unsigned char	data[];
+};
+
 struct scsi_device {
 	struct Scsi_Host *host;
 	struct request_queue *request_queue;
@@ -111,7 +123,7 @@ struct scsi_device {
 	unsigned sector_size;	/* size in bytes */
 
 	void *hostdata;		/* available to low-level driver */
-	char type;
+	unsigned char type;
 	char scsi_level;
 	char inq_periph_qual;	/* PQ from INQUIRY data */	
 	struct mutex inquiry_mutex;
@@ -122,10 +134,8 @@ struct scsi_device {
 	const char * rev;		/* ... "nullnullnullnull" before scan */
 
 #define SCSI_VPD_PG_LEN                255
-	int vpd_pg83_len;
-	unsigned char __rcu *vpd_pg83;
-	int vpd_pg80_len;
-	unsigned char __rcu *vpd_pg80;
+	struct scsi_vpd __rcu *vpd_pg83;
+	struct scsi_vpd __rcu *vpd_pg80;
 	unsigned char current_tag;	/* current tag */
 	struct scsi_target      *sdev_target;   /* used only for single_lun */
 
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index 4416b1026189c182bb75fbc5f677f587007522a7..5b416debf101c091874fcd4c94a1ef21433d4abf 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -39,7 +39,7 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
 
 	if (!req)
 		return NULL;
-	return req->special;
+	return blk_mq_rq_to_pdu(req);
 }
 
 #endif /* CONFIG_BLOCK */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index e308cd59e5561944ed12b9f5a94b90957377b285..e8644eea9fe56441d16ba78a8ea50f5b35d86f07 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -804,6 +804,7 @@ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
 struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
 		struct fc_vport_identifiers *);
 int fc_vport_terminate(struct fc_vport *vport);
+int fc_block_rport(struct fc_rport *rport);
 int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
 enum blk_eh_timer_return fc_eh_timed_out(struct scsi_cmnd *scmd);
 
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 73d870918939e082295a90bd9a092814dd97131b..a23304b7fb2e6837003a0d51297f4ebc9b3f0e52 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -5,6 +5,7 @@
 #include <linux/types.h>
 #include <linux/mutex.h>
 #include <scsi/sas.h>
+#include <linux/bsg-lib.h>
 
 struct scsi_transport_template;
 struct sas_rphy;
@@ -176,7 +177,8 @@ struct sas_function_template {
 	int (*phy_setup)(struct sas_phy *);
 	void (*phy_release)(struct sas_phy *);
 	int (*set_phy_speed)(struct sas_phy *, struct sas_phy_linkrates *);
-	int (*smp_handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
+	void (*smp_handler)(struct bsg_job *, struct Scsi_Host *,
+			struct sas_rphy *);
 };
 
 
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index dd096330734e3656829776f05ab20d02ba9a8d73..56ae198acc7378cffe6e8e1cb1c5019ee91c47c1 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -111,7 +111,7 @@ extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
 				       struct srp_rport_identifiers *);
 extern void srp_rport_del(struct srp_rport *);
 extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
-			 int dev_loss_tmo);
+			 long dev_loss_tmo);
 int srp_parse_tmo(int *tmo, const char *buf);
 extern int srp_reconnect_rport(struct srp_rport *rport);
 extern void srp_start_tl_fail_timers(struct srp_rport *rport);