From 1b708cfd74c66f501ea0e0c48287596489272da9 Mon Sep 17 00:00:00 2001 From: haodongdong Date: Thu, 12 Dec 2024 14:00:31 +0800 Subject: [PATCH] scsi: leapioraid: supports LEAPIO RAID controller This commit is to support LeapIO LEAPIO RAID controllers. RAID controllers support RAID 0/1/5/6/10/50/60 modes; RAID support SAS/SATA HDD/SSD. Signed-off-by: haodongdong --- .../configs/deepin_arm64_desktop_defconfig | 1 + .../deepin_loongarch_desktop_defconfig | 1 + arch/x86/configs/deepin_x86_desktop_defconfig | 1 + drivers/scsi/Kconfig | 1 + drivers/scsi/Makefile | 1 + drivers/scsi/leapioraid/Kconfig | 13 + drivers/scsi/leapioraid/Makefile | 9 + drivers/scsi/leapioraid/leapioraid.h | 2026 ++++ drivers/scsi/leapioraid/leapioraid_app.c | 2226 ++++ drivers/scsi/leapioraid/leapioraid_func.c | 7074 ++++++++++++ drivers/scsi/leapioraid/leapioraid_func.h | 1258 +++ drivers/scsi/leapioraid/leapioraid_os.c | 9823 +++++++++++++++++ .../scsi/leapioraid/leapioraid_transport.c | 1926 ++++ 13 files changed, 24360 insertions(+) create mode 100644 drivers/scsi/leapioraid/Kconfig create mode 100644 drivers/scsi/leapioraid/Makefile create mode 100644 drivers/scsi/leapioraid/leapioraid.h create mode 100644 drivers/scsi/leapioraid/leapioraid_app.c create mode 100644 drivers/scsi/leapioraid/leapioraid_func.c create mode 100644 drivers/scsi/leapioraid/leapioraid_func.h create mode 100644 drivers/scsi/leapioraid/leapioraid_os.c create mode 100644 drivers/scsi/leapioraid/leapioraid_transport.c diff --git a/arch/arm64/configs/deepin_arm64_desktop_defconfig b/arch/arm64/configs/deepin_arm64_desktop_defconfig index d3cb3f572b41..0a1e6cdcce91 100644 --- a/arch/arm64/configs/deepin_arm64_desktop_defconfig +++ b/arch/arm64/configs/deepin_arm64_desktop_defconfig @@ -1047,6 +1047,7 @@ CONFIG_MEGARAID_LEGACY=m CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT2SAS=m CONFIG_SCSI_MPI3MR=m +CONFIG_SCSI_LEAPIORAID=m CONFIG_SCSI_SMARTPQI=m CONFIG_SCSI_HPTIOP=m CONFIG_SCSI_BUSLOGIC=m diff --git a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig index f1217d6eae34..7d9703c00c94 100644 --- a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig +++ b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig @@ -993,6 +993,7 @@ CONFIG_MEGARAID_LEGACY=m CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT2SAS=m CONFIG_SCSI_MPI3MR=m +CONFIG_SCSI_LEAPIORAID=m CONFIG_SCSI_SMARTPQI=m CONFIG_SCSI_HPTIOP=m CONFIG_SCSI_BUSLOGIC=m diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index 88ada2c0bf78..facc22869cd2 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -978,6 +978,7 @@ CONFIG_MEGARAID_LEGACY=m CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT2SAS=m CONFIG_SCSI_MPI3MR=m +CONFIG_SCSI_LEAPIORAID=m CONFIG_SCSI_SMARTPQI=m CONFIG_SCSI_HPTIOP=m CONFIG_SCSI_BUSLOGIC=m diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 64703d52f23f..7cd6cc869d97 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -489,6 +489,7 @@ source "drivers/scsi/esas2r/Kconfig" source "drivers/scsi/megaraid/Kconfig.megaraid" source "drivers/scsi/mpt3sas/Kconfig" source "drivers/scsi/mpi3mr/Kconfig" +source "drivers/scsi/leapioraid/Kconfig" source "drivers/scsi/smartpqi/Kconfig" config SCSI_HPTIOP diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 4f1984eec5e7..a824ca7fdf15 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -100,6 +100,7 @@ obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ obj-$(CONFIG_MEGARAID_SAS) += megaraid/ obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/ obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/ +obj-$(CONFIG_SCSI_LEAPIORAID) += leapioraid/ obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o obj-$(CONFIG_SCSI_INITIO) += initio.o diff --git a/drivers/scsi/leapioraid/Kconfig b/drivers/scsi/leapioraid/Kconfig new file mode 100644 index 000000000000..a309d530284b --- /dev/null +++ b/drivers/scsi/leapioraid/Kconfig @@ -0,0 +1,13 @@ +# +# Kernel configuration file for the LEAPIORAID +# + +config SCSI_LEAPIORAID + tristate "LeapIO RAID Adapter" + depends on PCI && SCSI + select SCSI_SAS_ATTRS + select RAID_ATTRS + select IRQ_POLL + help + This driver supports LEAPIO RAID controller, which supports PCI Express Gen4 interface + and supports SAS/SATA HDD/SSD. diff --git a/drivers/scsi/leapioraid/Makefile b/drivers/scsi/leapioraid/Makefile new file mode 100644 index 000000000000..1a3786a56cb7 --- /dev/null +++ b/drivers/scsi/leapioraid/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the LEAPIORAID drivers. +# + +obj-$(CONFIG_SCSI_LEAPIORAID) += leapioraid.o +leapioraid-objs += leapioraid_func.o \ + leapioraid_os.o \ + leapioraid_transport.o \ + leapioraid_app.o diff --git a/drivers/scsi/leapioraid/leapioraid.h b/drivers/scsi/leapioraid/leapioraid.h new file mode 100644 index 000000000000..917459d0012b --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid.h @@ -0,0 +1,2026 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + */ + +#ifndef LEAPIORAID_H +#define LEAPIORAID_H + +typedef u8 U8; +typedef __le16 U16; +typedef __le32 U32; +typedef __le64 U64 __aligned(4); + +#define LEAPIORAID_IOC_STATE_RESET (0x00000000) +#define LEAPIORAID_IOC_STATE_READY (0x10000000) +#define LEAPIORAID_IOC_STATE_OPERATIONAL (0x20000000) +#define LEAPIORAID_IOC_STATE_FAULT (0x40000000) +#define LEAPIORAID_IOC_STATE_COREDUMP (0x50000000) +#define LEAPIORAID_IOC_STATE_MASK (0xF0000000) + +struct LeapioraidSysInterfaceRegs_t { + U32 Doorbell; + U32 WriteSequence; + U32 HostDiagnostic; + U32 Reserved1; + U32 DiagRWData; + U32 DiagRWAddressLow; + U32 DiagRWAddressHigh; + U32 Reserved2[5]; + U32 HostInterruptStatus; + U32 HostInterruptMask; + U32 DCRData; + U32 DCRAddress; + U32 Reserved3[2]; + U32 ReplyFreeHostIndex; + U32 Reserved4[8]; + U32 ReplyPostHostIndex; + U32 Reserved5; + U32 HCBSize; + U32 HCBAddressLow; + U32 HCBAddressHigh; + U32 Reserved6[12]; + U32 Scratchpad[4]; + U32 RequestDescriptorPostLow; + U32 RequestDescriptorPostHigh; + U32 AtomicRequestDescriptorPost; + U32 IocLogBufPosition; + U32 HostLogBufPosition; + U32 Reserved7[11]; +}; + +#define LEAPIORAID_DOORBELL_USED (0x08000000) +#define LEAPIORAID_DOORBELL_DATA_MASK (0x0000FFFF) +#define LEAPIORAID_DOORBELL_FUNCTION_SHIFT (24) +#define LEAPIORAID_DOORBELL_ADD_DWORDS_SHIFT (16) + +#define LEAPIORAID_DIAG_RESET_ADAPTER (0x00000004) + +#define LEAPIORAID_HIS_SYS2IOC_DB_STATUS (0x80000000) +#define LEAPIORAID_HIS_IOC2SYS_DB_STATUS (0x00000001) + +#define LEAPIORAID_RPHI_MSIX_INDEX_SHIFT (24) + +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) + +struct LEAPIORAID_DEFAULT_REQUEST_DESCRIPTOR { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; + U16 LMID; + U16 DescriptorTypeDependent; +}; + +struct LEAPIORAID_HIGH_PRIORITY_REQUEST_DESCRIPTOR { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; + U16 LMID; + U16 Reserved1; +}; + +struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; + U16 LMID; + U16 DevHandle; +}; + +typedef +struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR + LEAPIORAID_FP_SCSI_IO_REQUEST_DESCRIPTOR; + +union LeapioraidReqDescUnion_t { + struct LEAPIORAID_DEFAULT_REQUEST_DESCRIPTOR Default; + struct LEAPIORAID_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + LEAPIORAID_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; + U64 Words; +}; + +struct LeapioraidAtomicReqDesc_t { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; +}; + +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) + +struct LeapioraidDefaultRepDesc_t { + U8 ReplyFlags; + U8 MSIxIndex; + U16 DescriptorTypeDependent1; + U32 DescriptorTypeDependent2; +}; + +struct LEAPIORAID_ADDRESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; + U8 MSIxIndex; + U16 SMID; + U32 ReplyFrameAddress; +}; + +struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; + U8 MSIxIndex; + U16 SMID; + U16 TaskTag; + U16 Reserved1; +}; + +typedef +struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR + LEAPIORAID_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR; + +union LeapioraidRepDescUnion_t { + struct LeapioraidDefaultRepDesc_t Default; + struct LEAPIORAID_ADDRESS_REPLY_DESCRIPTOR AddressReply; + struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + LEAPIORAID_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; + U64 Words; +}; + +#define LEAPIORAID_FUNC_SCSI_IO_REQUEST (0x00) +#define LEAPIORAID_FUNC_SCSI_TASK_MGMT (0x01) +#define LEAPIORAID_FUNC_IOC_INIT (0x02) +#define LEAPIORAID_FUNC_IOC_FACTS (0x03) +#define LEAPIORAID_FUNC_CONFIG (0x04) +#define LEAPIORAID_FUNC_PORT_FACTS (0x05) +#define LEAPIORAID_FUNC_PORT_ENABLE (0x06) +#define LEAPIORAID_FUNC_EVENT_NOTIFICATION (0x07) +#define LEAPIORAID_FUNC_EVENT_ACK (0x08) +#define LEAPIORAID_FUNC_FW_DOWNLOAD (0x09) +#define LEAPIORAID_FUNC_FW_UPLOAD (0x12) +#define LEAPIORAID_FUNC_RAID_ACTION (0x15) +#define LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH (0x16) +#define LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR (0x18) +#define LEAPIORAID_FUNC_SMP_PASSTHROUGH (0x1A) +#define LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL (0x1B) +#define LEAPIORAID_FUNC_IO_UNIT_CONTROL (0x1B) +#define LEAPIORAID_FUNC_SATA_PASSTHROUGH (0x1C) +#define LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET (0x40) +#define LEAPIORAID_FUNC_HANDSHAKE (0x42) +#define LEAPIORAID_FUNC_LOG_INIT (0x57) + +#define LEAPIORAID_IOCSTATUS_MASK (0x7FFF) +#define LEAPIORAID_IOCSTATUS_SUCCESS (0x0000) +#define LEAPIORAID_IOCSTATUS_INVALID_FUNCTION (0x0001) +#define LEAPIORAID_IOCSTATUS_BUSY (0x0002) +#define LEAPIORAID_IOCSTATUS_INVALID_SGL (0x0003) +#define LEAPIORAID_IOCSTATUS_INTERNAL_ERROR (0x0004) +#define LEAPIORAID_IOCSTATUS_INVALID_VPID (0x0005) +#define LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) +#define LEAPIORAID_IOCSTATUS_INVALID_FIELD (0x0007) +#define LEAPIORAID_IOCSTATUS_INVALID_STATE (0x0008) +#define LEAPIORAID_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) +#define LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER (0x000A) + +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022) +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_DATA (0x0023) +#define LEAPIORAID_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024) +#define LEAPIORAID_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) + +#define LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) +#define LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042) +#define LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043) +#define LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044) +#define LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045) +#define LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046) +#define LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047) +#define LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048) +#define LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049) +#define LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A) +#define LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B) +#define LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) + +#define LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR (0x004D) +#define LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E) +#define LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F) + +#define LEAPIORAID_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) +#define LEAPIORAID_IOCSTATUS_TARGET_ABORTED (0x0063) +#define LEAPIORAID_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064) +#define LEAPIORAID_IOCSTATUS_TARGET_NO_CONNECTION (0x0065) +#define LEAPIORAID_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A) +#define LEAPIORAID_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D) +#define LEAPIORAID_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E) +#define LEAPIORAID_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F) +#define LEAPIORAID_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070) +#define LEAPIORAID_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) + +#define LEAPIORAID_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) +#define LEAPIORAID_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) +#define LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) + +struct LeapioraidReqHeader_t { + U16 FunctionDependent1; + U8 ChainOffset; + U8 Function; + U16 FunctionDependent2; + U8 FunctionDependent3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; +}; + +struct LeapioraidDefaultRep_t { + U16 FunctionDependent1; + U8 MsgLength; + U8 Function; + U16 FunctionDependent2; + U8 FunctionDependent3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U16 FunctionDependent5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LEAPIORAID_VERSION_STRUCT { + U8 Dev; + U8 Unit; + U8 Minor; + U8 Major; +}; + +union LEAPIORAID_VERSION_UNION { + struct LEAPIORAID_VERSION_STRUCT Struct; + U32 Word; +}; + +struct LeapioSGESimple32_t { + U32 FlagsLength; + U32 Address; +}; + +struct LeapioSGESimple64_t { + U32 FlagsLength; + U64 Address; +}; + +struct LEAPIORAID_SGE_SIMPLE_UNION { + U32 FlagsLength; + union { + U32 Address32; + U64 Address64; + } u; +}; + +struct LEAPIORAID_SGE_CHAIN_UNION { + U16 Length; + U8 NextChainOffset; + U8 Flags; + union { + U32 Address32; + U64 Address64; + } u; +}; + +#define LEAPIORAID_SGE_FLAGS_LAST_ELEMENT (0x80) +#define LEAPIORAID_SGE_FLAGS_END_OF_BUFFER (0x40) +#define LEAPIORAID_SGE_FLAGS_END_OF_LIST (0x01) +#define LEAPIORAID_SGE_FLAGS_SHIFT (24) +#define LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT (0x10) +#define LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS (0x00) +#define LEAPIORAID_SGE_FLAGS_HOST_TO_IOC (0x04) +#define LEAPIORAID_SGE_FLAGS_32_BIT_ADDRESSING (0x00) +#define LEAPIORAID_SGE_FLAGS_64_BIT_ADDRESSING (0x02) + +struct LEAPIORAID_IEEE_SGE_SIMPLE32 { + U32 Address; + U32 FlagsLength; +}; + +struct LEAPIORAID_IEEE_SGE_SIMPLE64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 Reserved2; + U8 Flags; +}; + +union LEAPIORAID_IEEE_SGE_SIMPLE_UNION { + struct LEAPIORAID_IEEE_SGE_SIMPLE32 Simple32; + struct LEAPIORAID_IEEE_SGE_SIMPLE64 Simple64; +}; + +union LEAPIORAID_IEEE_SGE_CHAIN_UNION { + struct LEAPIORAID_IEEE_SGE_SIMPLE32 Chain32; + struct LEAPIORAID_IEEE_SGE_SIMPLE64 Chain64; +}; + +struct LEAPIORAID_IEEE_SGE_CHAIN64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 NextChainOffset; + U8 Flags; +}; + +union LEAPIORAID_IEEE_SGE_IO_UNION { + struct LEAPIORAID_IEEE_SGE_SIMPLE64 IeeeSimple; + struct LEAPIORAID_IEEE_SGE_CHAIN64 IeeeChain; +}; + +#define LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST (0x40) +#define LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) +#define LEAPIORAID_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) +#define LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) + +union LEAPIORAID_SIMPLE_SGE_UNION { + struct LEAPIORAID_SGE_SIMPLE_UNION LeapioSimple; + union LEAPIORAID_IEEE_SGE_SIMPLE_UNION IeeeSimple; +}; + +union LEAPIORAID_SGE_IO_UNION { + struct LEAPIORAID_SGE_SIMPLE_UNION LeapioSimple; + struct LEAPIORAID_SGE_CHAIN_UNION LeapioChain; + union LEAPIORAID_IEEE_SGE_SIMPLE_UNION IeeeSimple; + union LEAPIORAID_IEEE_SGE_CHAIN_UNION IeeeChain; +}; + +struct LEAPIORAID_CONFIG_PAGE_HEADER { + U8 PageVersion; + U8 PageLength; + U8 PageNumber; + U8 PageType; +}; + +struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER { + U8 PageVersion; + U8 Reserved1; + U8 PageNumber; + U8 PageType; + U16 ExtPageLength; + U8 ExtPageType; + U8 Reserved2; +}; + +#define LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT (0x00) +#define LEAPIORAID_CONFIG_PAGETYPE_IOC (0x01) +#define LEAPIORAID_CONFIG_PAGETYPE_BIOS (0x02) +#define LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME (0x08) +#define LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING (0x09) +#define LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) +#define LEAPIORAID_CONFIG_PAGETYPE_EXTENDED (0x0F) +#define LEAPIORAID_CONFIG_PAGETYPE_MASK (0x0F) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY (0x13) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_LOG (0x14) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) + +#define LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000) + +#define LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000) +#define LEAPIORAID_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000) + +#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000) +#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000) +#define LEAPIORAID_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) +#define LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000) +#define LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) +#define LEAPIORAID_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000) +#define LEAPIORAID_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000) + +struct LeapioraidCfgReq_t { + U8 Action; + U8 SGLFlags; + U8 ChainOffset; + U8 Function; + U16 ExtPageLength; + U8 ExtPageType; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U8 Reserved2; + U8 ProxyVF_ID; + U16 Reserved4; + U32 Reserved3; + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 PageAddress; + union LEAPIORAID_SGE_IO_UNION PageBufferSGE; +}; + +#define LEAPIORAID_CONFIG_ACTION_PAGE_HEADER (0x00) +#define LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) +#define LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) +#define LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04) + +struct LeapioraidCfgRep_t { + U8 Action; + U8 SGLFlags; + U8 MsgLength; + U8 Function; + U16 ExtPageLength; + U8 ExtPageType; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U16 Reserved2; + U16 IOCStatus; + U32 IOCLogInfo; + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; +}; + +struct LeapioraidManP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U8 ChipName[16]; + U8 ChipRevision[8]; + U8 BoardName[16]; + U8 BoardAssembly[16]; + U8 BoardTracerNumber[16]; +}; + +struct LEAPIORAID_MANPAGE7_CONNECTOR_INFO { + U32 Pinout; + U8 Connector[16]; + U8 Location; + U8 ReceptacleID; + U16 Slot; + U16 Slotx2; + U16 Slotx4; +}; + +struct LeapioraidIOUnitP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U64 UniqueValue; + union LEAPIORAID_VERSION_UNION NvdataVersionDefault; + union LEAPIORAID_VERSION_UNION NvdataVersionPersistent; +}; + +struct LeapioraidIOUnitP1_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Flags; +}; + +#define LEAPIORAID_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100) +#define LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) + +struct LEAPIORAID_IOUNIT8_SENSOR { + U16 Flags; + U16 Reserved1; + U16 Threshold[4]; + U32 Reserved2; + U32 Reserved3; + U32 Reserved4; +}; + +struct LeapioraidIOUnitP8_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Reserved1; + U32 Reserved2; + U8 NumSensors; + U8 PollingInterval; + U16 Reserved3; + struct LEAPIORAID_IOUNIT8_SENSOR Sensor[]; +}; + +struct LeapioraidIOCP1_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Flags; + U32 CoalescingTimeout; + U8 CoalescingDepth; + U8 PCISlotNum; + U8 PCIBusNum; + U8 PCIDomainSegment; + U32 Reserved1; + U32 ProductSpecific; +}; + +struct LeapioraidIOCP8_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U8 NumDevsPerEnclosure; + U8 Reserved1; + U16 Reserved2; + U16 MaxPersistentEntries; + U16 MaxNumPhysicalMappedIDs; + U16 Flags; + U16 Reserved3; + U16 IRVolumeMappingFlags; + U16 Reserved4; + U32 Reserved5; +}; + +#define LEAPIORAID_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003) +#define LEAPIORAID_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000) + +struct LEAPIORAID_BOOT_DEVICE_ADAPTER_ORDER { + U32 Reserved1; + U32 Reserved2; + U32 Reserved3; + U32 Reserved4; + U32 Reserved5; + U32 Reserved6; +}; + +struct LEAPIORAID_BOOT_DEVICE_SAS_WWID { + U64 SASAddress; + U8 LUN[8]; + U32 Reserved1; + U32 Reserved2; +}; + +struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT { + U64 EnclosureLogicalID; + U32 Reserved1; + U32 Reserved2; + U16 SlotNumber; + U16 Reserved3; + U32 Reserved4; +}; + +struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME { + U64 DeviceName; + U8 LUN[8]; + U32 Reserved1; + U32 Reserved2; +}; + +union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE { + struct LEAPIORAID_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; + struct LEAPIORAID_BOOT_DEVICE_SAS_WWID SasWwid; + struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; + struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME DeviceName; +}; + +struct LeapioraidBiosP2_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Reserved1; + U32 Reserved2; + U32 Reserved3; + U32 Reserved4; + U32 Reserved5; + U32 Reserved6; + U8 ReqBootDeviceForm; + U8 Reserved7; + U16 Reserved8; + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; + U8 ReqAltBootDeviceForm; + U8 Reserved9; + U16 Reserved10; + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; + U8 CurrentBootDeviceForm; + U8 Reserved11; + U16 Reserved12; + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; +}; + +#define LEAPIORAID_BIOSPAGE2_FORM_MASK (0x0F) +#define LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00) +#define LEAPIORAID_BIOSPAGE2_FORM_SAS_WWID (0x05) +#define LEAPIORAID_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06) +#define LEAPIORAID_BIOSPAGE2_FORM_DEVICE_NAME (0x07) + +struct LEAPIORAID_ADAPTER_INFO { + U8 PciBusNumber; + U8 PciDeviceAndFunctionNumber; + U16 AdapterFlags; +}; + +struct LEAPIORAID_ADAPTER_ORDER_AUX { + U64 WWID; + U32 Reserved1; + U32 Reserved2; +}; + +struct LeapioraidBiosP3_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 GlobalFlags; + U32 BiosVersion; + struct LEAPIORAID_ADAPTER_INFO AdapterOrder[4]; + U32 Reserved1; + struct LEAPIORAID_ADAPTER_ORDER_AUX AdapterOrderAux[4]; +}; + +struct LEAPIORAID_RAIDVOL0_PHYS_DISK { + U8 RAIDSetNum; + U8 PhysDiskMap; + U8 PhysDiskNum; + U8 Reserved; +}; + +struct LEAPIORAID_RAIDVOL0_SETTINGS { + U16 Settings; + U8 HotSparePool; + U8 Reserved; +}; + +struct LeapioraidRaidVolP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U16 DevHandle; + U8 VolumeState; + U8 VolumeType; + U32 VolumeStatusFlags; + struct LEAPIORAID_RAIDVOL0_SETTINGS VolumeSettings; + U64 MaxLBA; + U32 StripeSize; + U16 BlockSize; + U16 Reserved1; + U8 SupportedPhysDisks; + U8 ResyncRate; + U16 DataScrubDuration; + U8 NumPhysDisks; + U8 Reserved2; + U8 Reserved3; + U8 InactiveStatus; + struct LEAPIORAID_RAIDVOL0_PHYS_DISK PhysDisk[]; +}; + +#define LEAPIORAID_RAID_VOL_STATE_MISSING (0x00) +#define LEAPIORAID_RAID_VOL_STATE_FAILED (0x01) +#define LEAPIORAID_RAID_VOL_STATE_INITIALIZING (0x02) +#define LEAPIORAID_RAID_VOL_STATE_ONLINE (0x03) +#define LEAPIORAID_RAID_VOL_STATE_DEGRADED (0x04) +#define LEAPIORAID_RAID_VOL_STATE_OPTIMAL (0x05) +#define LEAPIORAID_RAID_VOL_TYPE_RAID0 (0x00) +#define LEAPIORAID_RAID_VOL_TYPE_RAID1E (0x01) +#define LEAPIORAID_RAID_VOL_TYPE_RAID1 (0x02) +#define LEAPIORAID_RAID_VOL_TYPE_RAID10 (0x05) +#define LEAPIORAID_RAID_VOL_TYPE_UNKNOWN (0xFF) + +#define LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) + +struct LeapioraidRaidVolP1_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U16 DevHandle; + U16 Reserved0; + U8 GUID[24]; + U8 Name[16]; + U64 WWID; + U32 Reserved1; + U32 Reserved2; +}; + +struct LEAPIORAID_RAIDPHYSDISK0_SETTINGS { + U16 Reserved1; + U8 HotSparePool; + U8 Reserved2; +}; + +struct LEAPIORAID_RAIDPHYSDISK0_INQUIRY_DATA { + U8 VendorID[8]; + U8 ProductID[16]; + U8 ProductRevLevel[4]; + U8 SerialNum[32]; +}; + +struct LeapioraidRaidPDP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U16 DevHandle; + U8 Reserved1; + U8 PhysDiskNum; + struct LEAPIORAID_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; + U32 Reserved2; + struct LEAPIORAID_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; + U32 Reserved3; + U8 PhysDiskState; + U8 OfflineReason; + U8 IncompatibleReason; + U8 PhysDiskAttributes; + U32 PhysDiskStatusFlags; + U64 DeviceMaxLBA; + U64 HostMaxLBA; + U64 CoercedMaxLBA; + U16 BlockSize; + U16 Reserved5; + U32 Reserved6; +}; + +#define LEAPIORAID_RAID_PD_STATE_NOT_CONFIGURED (0x00) +#define LEAPIORAID_RAID_PD_STATE_NOT_COMPATIBLE (0x01) +#define LEAPIORAID_RAID_PD_STATE_OFFLINE (0x02) +#define LEAPIORAID_RAID_PD_STATE_ONLINE (0x03) +#define LEAPIORAID_RAID_PD_STATE_HOT_SPARE (0x04) +#define LEAPIORAID_RAID_PD_STATE_DEGRADED (0x05) +#define LEAPIORAID_RAID_PD_STATE_REBUILDING (0x06) +#define LEAPIORAID_RAID_PD_STATE_OPTIMAL (0x07) + +#define LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +#define LEAPIORAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) +#define LEAPIORAID_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define LEAPIORAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) +#define LEAPIORAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) +#define LEAPIORAID_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) +#define LEAPIORAID_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) +#define LEAPIORAID_SAS_NEG_LINK_RATE_1_5 (0x08) +#define LEAPIORAID_SAS_NEG_LINK_RATE_3_0 (0x09) +#define LEAPIORAID_SAS_NEG_LINK_RATE_6_0 (0x0A) +#define LEAPIORAID_SAS_NEG_LINK_RATE_12_0 (0x0B) + +#define LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY (0x00001000) + +#define LEAPIORAID_SAS_PRATE_MIN_RATE_MASK (0x0F) +#define LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK (0x0F) + +struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA { + U8 Port; + U8 PortFlags; + U8 PhyFlags; + U8 NegotiatedLinkRate; + U32 ControllerPhyDeviceInfo; + U16 AttachedDevHandle; + U16 ControllerDevHandle; + U32 DiscoveryStatus; + U32 Reserved; +}; + +struct LeapioraidSasIOUnitP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U32 Reserved1; + U8 NumPhys; + U8 Reserved2; + U16 Reserved3; + struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA PhyData[]; +}; + +#define LEAPIORAID_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08) +#define LEAPIORAID_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) +#define LEAPIORAID_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) +#define LEAPIORAID_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA { + U8 Port; + U8 PortFlags; + U8 PhyFlags; + U8 MaxMinLinkRate; + U32 ControllerPhyDeviceInfo; + U16 MaxTargetPortConnectTime; + U16 Reserved1; +}; + +struct LeapioraidSasIOUnitP1_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U16 ControlFlags; + U16 SASNarrowMaxQueueDepth; + U16 AdditionalControlFlags; + U16 SASWideMaxQueueDepth; + U8 NumPhys; + U8 SATAMaxQDepth; + U8 ReportDeviceMissingDelay; + U8 IODeviceMissingDelay; + struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA PhyData[]; +}; + +#define LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) +#define LEAPIORAID_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80) +#define LEAPIORAID_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) +#define LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) + +struct LeapioraidExpanderP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U8 PhysicalPort; + U8 ReportGenLength; + U16 EnclosureHandle; + U64 SASAddress; + U32 DiscoveryStatus; + U16 DevHandle; + U16 ParentDevHandle; + U16 ExpanderChangeCount; + U16 ExpanderRouteIndexes; + U8 NumPhys; + U8 SASLevel; + U16 Flags; + U16 STPBusInactivityTimeLimit; + U16 STPMaxConnectTimeLimit; + U16 STP_SMP_NexusLossTime; + U16 MaxNumRoutedSasAddresses; + U64 ActiveZoneManagerSASAddress; + U16 ZoneLockInactivityLimit; + U16 Reserved1; + U8 TimeToReducedFunc; + U8 InitialTimeToReducedFunc; + U8 MaxReducedFuncTime; + U8 Reserved2; +}; + +struct LeapioraidExpanderP1_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U8 PhysicalPort; + U8 Reserved1; + U16 Reserved2; + U8 NumPhys; + U8 Phy; + U16 NumTableEntriesProgrammed; + U8 ProgrammedLinkRate; + U8 HwLinkRate; + U16 AttachedDevHandle; + U32 PhyInfo; + U32 AttachedDeviceInfo; + U16 ExpanderDevHandle; + U8 ChangeCount; + U8 NegotiatedLinkRate; + U8 PhyIdentifier; + U8 AttachedPhyIdentifier; + U8 Reserved3; + U8 DiscoveryInfo; + U32 AttachedPhyInfo; + U8 ZoneGroup; + U8 SelfConfigStatus; + U16 Reserved4; +}; + +struct LeapioraidSasDevP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U16 Slot; + U16 EnclosureHandle; + U64 SASAddress; + U16 ParentDevHandle; + U8 PhyNum; + U8 AccessStatus; + U16 DevHandle; + U8 AttachedPhyIdentifier; + U8 ZoneGroup; + U32 DeviceInfo; + U16 Flags; + U8 PhysicalPort; + U8 MaxPortConnections; + U64 DeviceName; + U8 PortGroups; + U8 DmaGroup; + U8 ControlGroup; + U8 EnclosureLevel; + U8 ConnectorName[4]; + U32 Reserved3; +}; + +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) + +struct LeapioraidSasPhyP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U16 OwnerDevHandle; + U16 Reserved1; + U16 AttachedDevHandle; + U8 AttachedPhyIdentifier; + U8 Reserved2; + U32 AttachedPhyInfo; + U8 ProgrammedLinkRate; + U8 HwLinkRate; + U8 ChangeCount; + U8 Flags; + U32 PhyInfo; + U8 NegotiatedLinkRate; + U8 Reserved3; + U16 Reserved4; +}; + +struct LeapioraidSasPhyP1_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U32 Reserved1; + U32 InvalidDwordCount; + U32 RunningDisparityErrorCount; + U32 LossDwordSynchCount; + U32 PhyResetProblemCount; +}; + +struct LeapioraidSasEncP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U32 Reserved1; + U64 EnclosureLogicalID; + U16 Flags; + U16 EnclosureHandle; + U16 NumSlots; + U16 StartSlot; + U8 ChassisSlot; + U8 EnclosureLevel; + U16 SEPDevHandle; + U8 OEMRD; + U8 Reserved1a; + U16 Reserved2; + U32 Reserved3; +}; + +#define LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) + +struct LEAPIORAID_RAIDCONFIG0_CONFIG_ELEMENT { + U16 ElementFlags; + U16 VolDevHandle; + U8 HotSparePool; + U8 PhysDiskNum; + U16 PhysDiskDevHandle; +}; + +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F) +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001) +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002) +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003) + +struct LeapioraidRaidCfgP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U8 NumHotSpares; + U8 NumPhysDisks; + U8 NumVolumes; + U8 ConfigNum; + U32 Flags; + U8 ConfigGUID[24]; + U32 Reserved1; + U8 NumElements; + U8 Reserved2; + U16 Reserved3; + struct LEAPIORAID_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[]; +}; + +struct LeapioraidFWImgHeader_t { + U32 Signature; + U32 Signature0; + U32 Signature1; + U32 Signature2; + union LEAPIORAID_VERSION_UNION LEAPIOVersion; + union LEAPIORAID_VERSION_UNION FWVersion; + union LEAPIORAID_VERSION_UNION NVDATAVersion; + union LEAPIORAID_VERSION_UNION PackageVersion; + U16 VendorID; + U16 ProductID; + U16 ProtocolFlags; + U16 Reserved26; + U32 IOCCapabilities; + U32 ImageSize; + U32 NextImageHeaderOffset; + U32 Checksum; + U32 Reserved38; + U32 Reserved3C; + U32 Reserved40; + U32 Reserved44; + U32 Reserved48; + U32 Reserved4C; + U32 Reserved50; + U32 Reserved54; + U32 Reserved58; + U32 Reserved5C; + U32 BootFlags; + U32 FirmwareVersionNameWhat; + U8 FirmwareVersionName[32]; + U32 VendorNameWhat; + U8 VendorName[32]; + U32 PackageNameWhat; + U8 PackageName[32]; + U32 ReservedD0; + U32 ReservedD4; + U32 ReservedD8; + U32 ReservedDC; + U32 ReservedE0; + U32 ReservedE4; + U32 ReservedE8; + U32 ReservedEC; + U32 ReservedF0; + U32 ReservedF4; + U32 ReservedF8; + U32 ReservedFC; +}; + +struct LEAPIORAID_HASH_EXCLUSION_FORMAT { + U32 Offset; + U32 Size; +}; + +struct LeapioraidComptImgHeader_t { + U32 Signature0; + U32 LoadAddress; + U32 DataSize; + U32 StartAddress; + U32 Signature1; + U32 FlashOffset; + U32 FlashSize; + U32 VersionStringOffset; + U32 BuildDateStringOffset; + U32 BuildTimeStringOffset; + U32 EnvironmentVariableOffset; + U32 ApplicationSpecific; + U32 Signature2; + U32 HeaderSize; + U32 Crc; + U8 NotFlashImage; + U8 Compressed; + U16 Reserved3E; + U32 SecondaryFlashOffset; + U32 Reserved44; + U32 Reserved48; + union LEAPIORAID_VERSION_UNION RMCInterfaceVersion; + union LEAPIORAID_VERSION_UNION Reserved50; + union LEAPIORAID_VERSION_UNION FWVersion; + union LEAPIORAID_VERSION_UNION NvdataVersion; + struct LEAPIORAID_HASH_EXCLUSION_FORMAT HashExclusion[4]; + U32 NextImageHeaderOffset; + U32 Reserved80[32]; +}; + +struct LEAPIORAID_SCSI_IO_CDB_EEDP32 { + U8 CDB[20]; + __be32 PrimaryReferenceTag; + U16 PrimaryApplicationTag; + U16 PrimaryApplicationTagMask; + U32 TransferLength; +}; + +union LEAPIO_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + struct LEAPIORAID_SCSI_IO_CDB_EEDP32 EEDP32; + struct LEAPIORAID_SGE_SIMPLE_UNION SGE; +}; + +struct LeapioSCSIIOReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U16 Reserved1; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U32 SenseBufferLowAddress; + U16 SGLFlags; + U8 SenseBufferLength; + U8 Reserved4; + U8 SGLOffset0; + U8 SGLOffset1; + U8 SGLOffset2; + U8 SGLOffset3; + U32 SkipCount; + U32 DataLength; + U32 BidirectionalDataLength; + U16 IoFlags; + U16 EEDPFlags; + U32 EEDPBlockSize; + U32 SecondaryReferenceTag; + U16 SecondaryApplicationTag; + U16 ApplicationTagTranslationMask; + U8 LUN[8]; + U32 Control; + union LEAPIO_SCSI_IO_CDB_UNION CDB; + union LEAPIORAID_SGE_IO_UNION SGL; +}; + +#define LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00) + +#define LEAPIORAID_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) +#define LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) +#define LEAPIORAID_SCSIIO_CONTROL_WRITE (0x01000000) +#define LEAPIORAID_SCSIIO_CONTROL_READ (0x02000000) +#define LEAPIORAID_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000) +#define LEAPIORAID_SCSIIO_CONTROL_CMDPRI_SHIFT (11) +#define LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ (0x00000000) +#define LEAPIORAID_SCSIIO_CONTROL_ORDEREDQ (0x00000200) +#define LEAPIORAID_SCSIIO_CONTROL_TLR_ON (0x00000040) + +union LEAPIORAID_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + struct LEAPIORAID_SCSI_IO_CDB_EEDP32 EEDP32; + struct LEAPIORAID_IEEE_SGE_SIMPLE64 SGE; +}; + +struct LeapioraidSCSIIOReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U16 Reserved1; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U32 SenseBufferLowAddress; + U8 DMAFlags; + U8 Reserved5; + U8 SenseBufferLength; + U8 Reserved4; + U8 SGLOffset0; + U8 SGLOffset1; + U8 SGLOffset2; + U8 SGLOffset3; + U32 SkipCount; + U32 DataLength; + U32 BidirectionalDataLength; + U16 IoFlags; + U16 EEDPFlags; + U16 EEDPBlockSize; + U16 Reserved6; + U32 SecondaryReferenceTag; + U16 SecondaryApplicationTag; + U16 ApplicationTagTranslationMask; + U8 LUN[8]; + U32 Control; + union LEAPIORAID_SCSI_IO_CDB_UNION CDB; + union LEAPIORAID_IEEE_SGE_IO_UNION SGL; +}; + +struct LeapioraidSCSIIORep_t { + U16 DevHandle; + U8 MsgLength; + U8 Function; + U16 Reserved1; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U8 SCSIStatus; + U8 SCSIState; + U16 IOCStatus; + U32 IOCLogInfo; + U32 TransferCount; + U32 SenseCount; + U32 ResponseInfo; + U16 TaskTag; + U16 SCSIStatusQualifier; + U32 BidirectionalTransferCount; + U32 EEDPErrorOffset; + U16 EEDPObservedAppTag; + U16 EEDPObservedGuard; + U32 EEDPObservedRefTag; +}; + +#define LEAPIORAID_SCSI_STATUS_GOOD (0x00) +#define LEAPIORAID_SCSI_STATUS_CHECK_CONDITION (0x02) +#define LEAPIORAID_SCSI_STATUS_CONDITION_MET (0x04) +#define LEAPIORAID_SCSI_STATUS_BUSY (0x08) +#define LEAPIORAID_SCSI_STATUS_INTERMEDIATE (0x10) +#define LEAPIORAID_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) +#define LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT (0x18) +#define LEAPIORAID_SCSI_STATUS_COMMAND_TERMINATED (0x22) +#define LEAPIORAID_SCSI_STATUS_TASK_SET_FULL (0x28) +#define LEAPIORAID_SCSI_STATUS_ACA_ACTIVE (0x30) +#define LEAPIORAID_SCSI_STATUS_TASK_ABORTED (0x40) +#define LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define LEAPIORAID_SCSI_STATE_TERMINATED (0x08) +#define LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS (0x04) +#define LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED (0x02) +#define LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID (0x01) + +struct LeapioraidSCSITmgReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U8 Reserved1; + U8 TaskType; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U8 LUN[8]; + U32 Reserved4[7]; + U16 TaskMID; + U16 Reserved5; +}; + +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) + +struct LeapioraidSCSITmgRep_t { + U16 DevHandle; + U8 MsgLength; + U8 Function; + U8 ResponseCode; + U8 TaskType; + U8 Reserved1; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + U32 TerminationCount; + U32 ResponseInfo; +}; + +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) +#define LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_FAILED (0x05) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) +#define LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) + +struct LeapioraidSepReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U8 Action; + U8 Flags; + U8 Reserved1; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U32 SlotStatus; + U32 Reserved3; + U32 Reserved4; + U32 Reserved5; + U16 Slot; + U16 EnclosureHandle; +}; + +#define LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS (0x00) +#define LEAPIORAID_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00) +#define LEAPIORAID_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) +#define LEAPIORAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) + +struct LeapioraidSepRep_t { + U16 DevHandle; + U8 MsgLength; + U8 Function; + U8 Action; + U8 Flags; + U8 Reserved1; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + U32 SlotStatus; + U32 Reserved4; + U16 Slot; + U16 EnclosureHandle; +}; + +struct LeapioraidIOCInitReq_t { + U8 WhoInit; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 MsgVersion; + U16 HeaderVersion; + U32 Reserved5; + U16 ConfigurationFlags; + U8 HostPageSize; + U8 HostMSIxVectors; + U16 Reserved8; + U16 SystemRequestFrameSize; + U16 ReplyDescriptorPostQueueDepth; + U16 ReplyFreeQueueDepth; + U32 SenseBufferAddressHigh; + U32 SystemReplyAddressHigh; + U64 SystemRequestFrameBaseAddress; + U64 ReplyDescriptorPostQueueAddress; + U64 ReplyFreeQueueAddress; + U64 TimeStamp; +}; + +#define LEAPIORAID_WHOINIT_HOST_DRIVER (0x04) +#define LEAPIORAID_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) + +struct LeapioraidIOCInitRDPQArrayEntry { + U64 RDPQBaseAddress; + U32 Reserved1; + U32 Reserved2; +}; + +struct LeapioraidIOCInitRep_t { + U8 WhoInit; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LeapioraidIOCLogReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U64 BufAddr; + U32 BufSize; +}; + +struct LeapioraidIOCLogRep_t { + U16 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LeapioraidIOCFactsReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; +}; + +struct LeapioraidIOCFactsRep_t { + U16 MsgVersion; + U8 MsgLength; + U8 Function; + U16 HeaderVersion; + U8 IOCNumber; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U16 IOCExceptions; + U16 IOCStatus; + U32 IOCLogInfo; + U8 MaxChainDepth; + U8 WhoInit; + U8 NumberOfPorts; + U8 MaxMSIxVectors; + U16 RequestCredit; + U16 ProductID; + U32 IOCCapabilities; + union LEAPIORAID_VERSION_UNION FWVersion; + U16 IOCRequestFrameSize; + U16 IOCMaxChainSegmentSize; + U16 MaxInitiators; + U16 MaxTargets; + U16 MaxSasExpanders; + U16 MaxEnclosures; + U16 ProtocolFlags; + U16 HighPriorityCredit; + U16 MaxReplyDescriptorPostQueueDepth; + U8 ReplyFrameSize; + U8 MaxVolumes; + U16 MaxDevHandle; + U16 MaxPersistentEntries; + U16 MinDevHandle; + U8 CurrentHostPageSize; + U8 Reserved4; + U8 SGEModifierMask; + U8 SGEModifierValue; + U8 SGEModifierShift; + U8 Reserved5; +}; + +#define LEAPIORAID_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_TLR (0x00000800) +#define LEAPIORAID_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) +#define LEAPIORAID_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) +#define LEAPIORAID_IOCFACTS_CAPABILITY_EEDP (0x00000040) +#define LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) +#define LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) +#define LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) + +struct LeapioraidPortFactsReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 PortNumber; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; +}; + +struct LeapioraidPortFactsRep_t { + U16 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 PortNumber; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U16 IOCStatus; + U32 IOCLogInfo; + U8 Reserved5; + U8 PortType; + U16 Reserved6; + U16 MaxPostedCmdBuffers; + U16 Reserved7; +}; + +struct LeapioraidPortEnableReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U8 Reserved2; + U8 PortFlags; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; +}; + +struct LeapioraidPortEnableRep_t { + U16 Reserved1; + U8 MsgLength; + U8 Function; + U8 Reserved2; + U8 PortFlags; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +#define LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS (4) +struct LeapioraidEventNotificationReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U32 Reserved5; + U32 Reserved6; + U32 EventMasks[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; + U16 SASBroadcastPrimitiveMasks; + U16 SASNotifyPrimitiveMasks; + U32 Reserved8; +}; + +struct LeapioraidEventNotificationRep_t { + U16 EventDataLength; + U8 MsgLength; + U8 Function; + U16 Reserved1; + U8 AckRequired; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + U16 Event; + U16 Reserved4; + U32 EventContext; + U32 EventData[]; +}; + +#define LEAPIORAID_EVENT_NOTIFICATION_ACK_REQUIRED (0x01) +#define LEAPIORAID_EVENT_LOG_DATA (0x0001) +#define LEAPIORAID_EVENT_STATE_CHANGE (0x0002) +#define LEAPIORAID_EVENT_HARD_RESET_RECEIVED (0x0005) +#define LEAPIORAID_EVENT_EVENT_CHANGE (0x000A) +#define LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) +#define LEAPIORAID_EVENT_IR_OPERATION_STATUS (0x0014) +#define LEAPIORAID_EVENT_SAS_DISCOVERY (0x0016) +#define LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017) +#define LEAPIORAID_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018) +#define LEAPIORAID_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) +#define LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) +#define LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define LEAPIORAID_EVENT_IR_VOLUME (0x001E) +#define LEAPIORAID_EVENT_IR_PHYSICAL_DISK (0x001F) +#define LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) +#define LEAPIORAID_EVENT_LOG_ENTRY_ADDED (0x0021) +#define LEAPIORAID_EVENT_SAS_QUIESCE (0x0025) +#define LEAPIORAID_EVENT_TEMP_THRESHOLD (0x0027) +#define LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035) + +struct LeapioraidEventDataSasDeviceStatusChange_t { + U16 TaskTag; + U8 ReasonCode; + U8 PhysicalPort; + U8 ASC; + U8 ASCQ; + U16 DevHandle; + U32 Reserved2; + U64 SASAddress; + U8 LUN[8]; +}; + +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12) + +struct LeapioraidEventDataIrOpStatus_t { + U16 VolDevHandle; + U16 Reserved1; + U8 RAIDOperation; + U8 PercentComplete; + U16 Reserved2; + U32 ElapsedSeconds; +}; + +#define LEAPIORAID_EVENT_IR_RAIDOP_RESYNC (0x00) +#define LEAPIORAID_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01) +#define LEAPIORAID_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02) +#define LEAPIORAID_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03) +#define LEAPIORAID_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04) + +struct LeapioraidEventDataIrVol_t { + U16 VolDevHandle; + U8 ReasonCode; + U8 Reserved1; + U32 NewValue; + U32 PreviousValue; +}; + +#define LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03) +struct LeapioraidEventDataIrPhyDisk_t { + U16 Reserved1; + U8 ReasonCode; + U8 PhysDiskNum; + U16 PhysDiskDevHandle; + U16 Reserved2; + U16 Slot; + U16 EnclosureHandle; + U32 NewValue; + U32 PreviousValue; +}; + +#define LEAPIORAID_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03) + +struct LeapioraidEventIrCfgEle_t { + U16 ElementFlags; + U16 VolDevHandle; + U8 ReasonCode; + U8 PhysDiskNum; + U16 PhysDiskDevHandle; +}; + +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F) +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000) +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001) +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED (0x01) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED (0x02) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE (0x04) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE (0x05) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED (0x08) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED (0x09) + +struct LeapioraidEventDataIrCfgChangeList_t { + U8 NumElements; + U8 Reserved1; + U8 Reserved2; + U8 ConfigNum; + U32 Flags; + struct LeapioraidEventIrCfgEle_t ConfigElement[]; +}; + +#define LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001) +struct LeapioraidEventDataSasDiscovery_t { + U8 Flags; + U8 ReasonCode; + U8 PhysicalPort; + U8 Reserved1; + U32 DiscoveryStatus; +}; + +#define LEAPIORAID_EVENT_SAS_DISC_RC_STARTED (0x01) + +struct LeapioraidEventDataSasBroadcastPrimitive_t { + U8 PhyNum; + U8 Port; + U8 PortWidth; + U8 Primitive; +}; + +#define LEAPIORAID_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04) + +struct LEAPIORAID_EVENT_SAS_TOPO_PHY_ENTRY { + U16 AttachedDevHandle; + U8 LinkRate; + U8 PhyStatus; +}; + +struct LeapioraidEventDataSasTopoChangeList_t { + U16 EnclosureHandle; + U16 ExpanderDevHandle; + U8 NumPhys; + U8 Reserved1; + U16 Reserved2; + U8 NumEntries; + U8 StartPhyNum; + U8 ExpStatus; + U8 PhysicalPort; + struct LEAPIORAID_EVENT_SAS_TOPO_PHY_ENTRY PHY[]; +}; + +#define LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED (0x01) +#define LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) +#define LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING (0x03) +#define LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) +#define LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_MASK (0x0F) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05) + +struct LeapioraidEventDataSasEnclDevStatusChange_t { + U16 EnclosureHandle; + U8 ReasonCode; + U8 PhysicalPort; + U64 EnclosureLogicalID; + U16 NumSlots; + U16 StartSlot; + U32 PhyBits; +}; + +#define LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED (0x01) +#define LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) + +struct LeapioraidEventDataSasDeviceDiscoveryError_t { + U16 DevHandle; + U8 ReasonCode; + U8 PhysicalPort; + U32 Reserved1[2]; + U64 SASAddress; + U32 Reserved2[2]; +}; + +#define LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01) +#define LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02) + +struct LeapioraidEventAckReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Event; + U16 Reserved5; + U32 EventContext; +}; + +struct LeapioraidFWUploadReq_t { + U8 ImageType; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U32 Reserved5; + U32 Reserved6; + U32 Reserved7; + U32 ImageOffset; + U32 ImageSize; + union LEAPIORAID_IEEE_SGE_IO_UNION SGL; +}; + +struct LeapioraidFWUploadRep_t { + U8 ImageType; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; + U32 ActualImageSize; +}; + +struct LeapioraidIoUnitControlReq_t { + U8 Operation; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U8 PhyNum; + U8 PrimFlags; + U32 Primitive; + U8 LookupMethod; + U8 Reserved5; + U16 SlotNumber; + U64 LookupAddress; + U32 IOCParameterValue; + U32 IOCParameterValue2; + U32 Reserved8; +}; + +#define LEAPIORAID_CTRL_OP_REMOVE_DEVICE (0x0D) + +struct LeapioraidIoUnitControlRep_t { + U8 Operation; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LEAPIORAID_RAID_ACTION_RATE_DATA { + U8 RateToChange; + U8 RateOrMode; + U16 DataScrubDuration; +}; + +struct LEAPIORAID_RAID_ACTION_START_RAID_FUNCTION { + U8 RAIDFunction; + U8 Flags; + U16 Reserved1; +}; + +struct LEAPIORAID_RAID_ACTION_STOP_RAID_FUNCTION { + U8 RAIDFunction; + U8 Flags; + U16 Reserved1; +}; + +struct LEAPIORAID_RAID_ACTION_HOT_SPARE { + U8 HotSparePool; + U8 Reserved1; + U16 DevHandle; +}; + +struct LEAPIORAID_RAID_ACTION_FW_UPDATE_MODE { + U8 Flags; + U8 DeviceFirmwareUpdateModeTimeout; + U16 Reserved1; +}; + +union LEAPIORAID_RAID_ACTION_DATA { + U32 Word; + struct LEAPIORAID_RAID_ACTION_RATE_DATA Rates; + struct LEAPIORAID_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; + struct LEAPIORAID_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; + struct LEAPIORAID_RAID_ACTION_HOT_SPARE HotSpare; + struct LEAPIORAID_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; +}; + +struct LeapioraidRaidActionReq_t { + U8 Action; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 VolDevHandle; + U8 PhysDiskNum; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U32 Reserved3; + union LEAPIORAID_RAID_ACTION_DATA ActionDataWord; + struct LEAPIORAID_SGE_SIMPLE_UNION ActionDataSGE; +}; + +struct LEAPIORAID_RAID_VOL_INDICATOR { + U64 TotalBlocks; + U64 BlocksRemaining; + U32 Flags; + U32 ElapsedSeconds; +}; + +struct LEAPIORAID_RAID_COMPATIBILITY_RESULT_STRUCT { + U8 State; + U8 Reserved1; + U16 Reserved2; + U32 GenericAttributes; + U32 OEMSpecificAttributes; + U32 Reserved3; + U32 Reserved4; +}; + +union LEAPIORAID_RAID_ACTION_REPLY_DATA { + U32 Word[6]; + struct LEAPIORAID_RAID_VOL_INDICATOR RaidVolumeIndicator; + U16 VolDevHandle; + U8 VolumeState; + U8 PhysDiskNum; + struct LEAPIORAID_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; +}; + +struct LeapioraidRaidActionRep_t { + U8 Action; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 VolDevHandle; + U8 PhysDiskNum; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + union LEAPIORAID_RAID_ACTION_REPLY_DATA ActionData; +}; + +#define LEAPIORAID_SAS_DEVICE_INFO_SEP (0x00004000) +#define LEAPIORAID_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET (0x00000400) +#define LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET (0x00000200) +#define LEAPIORAID_SAS_DEVICE_INFO_SMP_TARGET (0x00000100) +#define LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define LEAPIORAID_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define LEAPIORAID_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define LEAPIORAID_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define LEAPIORAID_SAS_DEVICE_INFO_SATA_HOST (0x00000008) +#define LEAPIORAID_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define LEAPIORAID_SAS_DEVICE_INFO_NO_DEVICE (0x00000000) +#define LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE (0x00000001) +#define LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + +struct LeapioraidSmpPassthroughReq_t { + U8 PassthroughFlags; + U8 PhysicalPort; + U8 ChainOffset; + U8 Function; + U16 RequestDataLength; + U8 SGLFlags; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U32 Reserved2; + U64 SASAddress; + U32 Reserved3; + U32 Reserved4; + union LEAPIORAID_SIMPLE_SGE_UNION SGL; +}; + +struct LeapioraidSmpPassthroughRep_t { + U8 PassthroughFlags; + U8 PhysicalPort; + U8 MsgLength; + U8 Function; + U16 ResponseDataLength; + U8 SGLFlags; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U8 Reserved2; + U8 SASStatus; + U16 IOCStatus; + U32 IOCLogInfo; + U32 Reserved3; + U8 ResponseData[4]; +}; + +struct LeapioraidSasIoUnitControlReq_t { + U8 Operation; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U8 PhyNum; + U8 PrimFlags; + U32 Primitive; + U8 LookupMethod; + U8 Reserved5; + U16 SlotNumber; + U64 LookupAddress; + U32 IOCParameterValue; + U32 Reserved7; + U32 Reserved8; +}; + +#define LEAPIORAID_SAS_OP_PHY_LINK_RESET (0x06) +#define LEAPIORAID_SAS_OP_PHY_HARD_RESET (0x07) +#define LEAPIORAID_SAS_OP_REMOVE_DEVICE (0x0D) +struct LeapioraidSasIoUnitControlRep_t { + U8 Operation; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U16 IOCStatus; + U32 IOCLogInfo; +}; +#endif \ No newline at end of file diff --git a/drivers/scsi/leapioraid/leapioraid_app.c b/drivers/scsi/leapioraid/leapioraid_app.c new file mode 100644 index 000000000000..9d699721d1be --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_app.c @@ -0,0 +1,2226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" + +#ifdef __KERNEL__ +#include +#endif +#include "leapioraid_func.h" + +#define LEAPIORAID_DEV_NAME "leapioraid_ctl" + +#define LEAPIORAID_MAGIC_NUMBER 'L' +#define LEAPIORAID_IOCTL_DEFAULT_TIMEOUT (10) + +#define LEAPIORAID_IOCINFO \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 17, struct leapio_ioctl_iocinfo) +#define LEAPIORAID_COMMAND \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 20, struct leapio_ioctl_command) +#ifdef CONFIG_COMPAT +#define LEAPIORAID_COMMAND32 \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 20, struct leapio_ioctl_command32) +#endif +#define LEAPIORAID_EVENTQUERY \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 21, struct leapio_ioctl_eventquery) +#define LEAPIORAID_EVENTENABLE \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 22, struct leapio_ioctl_eventenable) +#define LEAPIORAID_EVENTREPORT \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 23, struct leapio_ioctl_eventreport) +#define LEAPIORAID_HARDRESET \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 24, struct leapio_ioctl_diag_reset) +#define LEAPIORAID_BTDHMAPPING \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 31, struct leapio_ioctl_btdh_mapping) + +struct leapio_ioctl_header { + uint32_t ioc_number; + uint32_t port_number; + uint32_t max_data_size; +}; + +struct leapio_ioctl_diag_reset { + struct leapio_ioctl_header hdr; +}; + +struct leapio_ioctl_pci_info { + union { + struct { + uint32_t device:5; + uint32_t function:3; + uint32_t bus:24; + } bits; + uint32_t word; + } u; + uint32_t segment_id; +}; + +struct leapio_ioctl_iocinfo { + struct leapio_ioctl_header hdr; + uint32_t adapter_type; + uint32_t port_number; + uint32_t pci_id; + uint32_t hw_rev; + uint32_t subsystem_device; + uint32_t subsystem_vendor; + uint32_t rsvd0; + uint32_t firmware_version; + uint32_t bios_version; + uint8_t driver_version[32]; + uint8_t rsvd1; + uint8_t scsi_id; + uint16_t rsvd2; + struct leapio_ioctl_pci_info pci_information; +}; + +#define LEAPIORAID_CTL_EVENT_LOG_SIZE (200) +struct leapio_ioctl_eventquery { + struct leapio_ioctl_header hdr; + uint16_t event_entries; + uint16_t rsvd; + uint32_t event_types[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; +}; + +struct leapio_ioctl_eventenable { + struct leapio_ioctl_header hdr; + uint32_t event_types[4]; +}; + +#define LEAPIORAID_EVENT_DATA_SIZE (192) +struct LEAPIORAID_IOCTL_EVENTS { + uint32_t event; + uint32_t context; + uint8_t data[LEAPIORAID_EVENT_DATA_SIZE]; +}; + +struct leapio_ioctl_eventreport { + struct leapio_ioctl_header hdr; + struct LEAPIORAID_IOCTL_EVENTS event_data[]; +}; + +struct leapio_ioctl_command { + struct leapio_ioctl_header hdr; + uint32_t timeout; + void __user *reply_frame_buf_ptr; + void __user *data_in_buf_ptr; + void __user *data_out_buf_ptr; + void __user *sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[]; +}; + +#ifdef CONFIG_COMPAT +struct leapio_ioctl_command32 { + struct leapio_ioctl_header hdr; + uint32_t timeout; + uint32_t reply_frame_buf_ptr; + uint32_t data_in_buf_ptr; + uint32_t data_out_buf_ptr; + uint32_t sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[]; +}; +#endif + +struct leapio_ioctl_btdh_mapping { + struct leapio_ioctl_header hdr; + uint32_t id; + uint32_t bus; + uint16_t handle; + uint16_t rsvd; +}; + +static struct fasync_struct *leapioraid_async_queue; +static DECLARE_WAIT_QUEUE_HEAD(leapioraid_ctl_poll_wait); + +enum leapioraid_block_state { + NON_BLOCKING, + BLOCKING, +}; + +static void +leapioraid_ctl_display_some_debug( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + char *calling_function_name, + struct LeapioraidDefaultRep_t *mpi_reply) +{ + struct LeapioraidCfgReq_t *mpi_request; + char *desc = NULL; + + if (!(ioc->logging_level & LEAPIORAID_DEBUG_IOCTL)) + return; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + switch (mpi_request->Function) { + case LEAPIORAID_FUNC_SCSI_IO_REQUEST: + { + struct LeapioSCSIIOReq_t *scsi_request = + (struct LeapioSCSIIOReq_t *) mpi_request; + snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH, + "scsi_io, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case LEAPIORAID_FUNC_SCSI_TASK_MGMT: + desc = "task_mgmt"; + break; + case LEAPIORAID_FUNC_IOC_INIT: + desc = "ioc_init"; + break; + case LEAPIORAID_FUNC_IOC_FACTS: + desc = "ioc_facts"; + break; + case LEAPIORAID_FUNC_CONFIG: + { + struct LeapioraidCfgReq_t *config_request = + (struct LeapioraidCfgReq_t *) mpi_request; + snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH, + "config, type(0x%02x), ext_type(0x%02x), number(%d)", + (config_request->Header.PageType & + LEAPIORAID_CONFIG_PAGETYPE_MASK), + config_request->ExtPageType, + config_request->Header.PageNumber); + desc = ioc->tmp_string; + break; + } + case LEAPIORAID_FUNC_PORT_FACTS: + desc = "port_facts"; + break; + case LEAPIORAID_FUNC_PORT_ENABLE: + desc = "port_enable"; + break; + case LEAPIORAID_FUNC_EVENT_NOTIFICATION: + desc = "event_notification"; + break; + case LEAPIORAID_FUNC_FW_DOWNLOAD: + desc = "fw_download"; + break; + case LEAPIORAID_FUNC_FW_UPLOAD: + desc = "fw_upload"; + break; + case LEAPIORAID_FUNC_RAID_ACTION: + desc = "raid_action"; + break; + case LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH: + { + struct LeapioSCSIIOReq_t *scsi_request = + (struct LeapioSCSIIOReq_t *) mpi_request; + snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH, + "raid_pass, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL: + desc = "sas_iounit_cntl"; + break; + case LEAPIORAID_FUNC_SATA_PASSTHROUGH: + desc = "sata_pass"; + break; + case LEAPIORAID_FUNC_SMP_PASSTHROUGH: + desc = "smp_passthrough"; + break; + } + if (!desc) + return; + pr_info("%s %s: %s, smid(%d)\n", + ioc->name, calling_function_name, desc, smid); + if (!mpi_reply) + return; + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + pr_info( + "%s \tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + mpi_request->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH) { + struct LeapioraidSCSIIORep_t *scsi_reply = + (struct LeapioraidSCSIIORep_t *) mpi_reply; + struct leapioraid_sas_device *sas_device = NULL; + + sas_device = leapioraid_get_sdev_by_handle(ioc, + le16_to_cpu(scsi_reply->DevHandle)); + if (sas_device) { + pr_info("%s \tsas_address(0x%016llx), phy(%d)\n", + ioc->name, (unsigned long long) + sas_device->sas_address, sas_device->phy); + if (sas_device->enclosure_handle != 0) + pr_info( + "%s \tenclosure_logical_id(0x%016llx), slot(%d)\n", + ioc->name, (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + leapioraid_sas_device_put(sas_device); + } + if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) + pr_info( + "%s \tscsi_state(0x%02x), scsi_status (0x%02x)\n", + ioc->name, scsi_reply->SCSIState, scsi_reply->SCSIStatus); + } +} + +u8 +leapioraid_ctl_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + struct LeapioraidSCSIIORep_t *scsiio_reply; + const void *sense_data; + u32 sz; + + if (ioc->ctl_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->ctl_cmds.smid != smid) + return 1; + ioc->ctl_cmds.status |= LEAPIORAID_CMD_COMPLETE; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->ctl_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc->ctl_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + if (mpi_reply->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + mpi_reply->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH) { + scsiio_reply = (struct LeapioraidSCSIIORep_t *) mpi_reply; + if (scsiio_reply->SCSIState & + LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(scsiio_reply->SenseCount)); + sense_data = + leapioraid_base_get_sense_buffer(ioc, smid); + memcpy(ioc->ctl_cmds.sense, sense_data, sz); + } + } + } + leapioraid_ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); + ioc->ctl_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->ctl_cmds.done); + return 1; +} + +static int leapioraid_ctl_check_event_type( + struct LEAPIORAID_ADAPTER *ioc, u16 event) +{ + u16 i; + u32 desired_event; + + if (event >= 128 || !event || !ioc->event_log) + return 0; + desired_event = (1 << (event % 32)); + if (!desired_event) + desired_event = 1; + i = event / 32; + return desired_event & ioc->event_type[i]; +} + +void +leapioraid_ctl_add_to_event_log( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventNotificationRep_t *mpi_reply) +{ + struct LEAPIORAID_IOCTL_EVENTS *event_log; + u16 event; + int i; + u32 sz, event_data_sz; + u8 send_aen = 0; + + if (!ioc->event_log) + return; + event = le16_to_cpu(mpi_reply->Event); + if (leapioraid_ctl_check_event_type(ioc, event)) { + i = ioc->event_context % LEAPIORAID_CTL_EVENT_LOG_SIZE; + event_log = ioc->event_log; + event_log[i].event = event; + event_log[i].context = ioc->event_context++; + event_data_sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + sz = min_t(u32, event_data_sz, LEAPIORAID_EVENT_DATA_SIZE); + memset(event_log[i].data, 0, LEAPIORAID_EVENT_DATA_SIZE); + memcpy(event_log[i].data, mpi_reply->EventData, sz); + send_aen = 1; + } + if (event == LEAPIORAID_EVENT_LOG_ENTRY_ADDED || + (send_aen && !ioc->aen_event_read_flag)) { + ioc->aen_event_read_flag = 1; + wake_up_interruptible(&leapioraid_ctl_poll_wait); + if (leapioraid_async_queue) + kill_fasync(&leapioraid_async_queue, SIGIO, POLL_IN); + } +} + +u8 +leapioraid_ctl_event_callback( + struct LEAPIORAID_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + struct LeapioraidEventNotificationRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) + leapioraid_ctl_add_to_event_log(ioc, mpi_reply); + return 1; +} + +static int +leapioraid_ctl_verify_adapter( + int ioc_number, struct LEAPIORAID_ADAPTER **iocpp) +{ + struct LEAPIORAID_ADAPTER *ioc; + + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) { + if (ioc->id != ioc_number) + continue; + spin_unlock(&leapioraid_gioc_lock); + *iocpp = ioc; + return ioc_number; + } + spin_unlock(&leapioraid_gioc_lock); + *iocpp = NULL; + return -1; +} + +void +leapioraid_ctl_clear_outstanding_ioctls(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->ctl_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->ctl_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->ctl_cmds.smid); + complete(&ioc->ctl_cmds.done); + } +} + +void +leapioraid_ctl_reset_handler(struct LEAPIORAID_ADAPTER *ioc, int reset_phase) +{ + switch (reset_phase) { + case LEAPIORAID_IOC_PRE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n", ioc->name, + __func__)); + break; + case LEAPIORAID_IOC_AFTER_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n", ioc->name, + __func__)); + leapioraid_ctl_clear_outstanding_ioctls(ioc); + break; + case LEAPIORAID_IOC_DONE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n", ioc->name, + __func__)); + break; + } +} + +static int +leapioraid_ctl_fasync(int fd, struct file *filep, int mode) +{ + return fasync_helper(fd, filep, mode, &leapioraid_async_queue); +} + +int +leapioraid_ctl_release(struct inode *inode, struct file *filep) +{ + return fasync_helper(-1, filep, 0, &leapioraid_async_queue); +} + +static unsigned int +leapioraid_ctl_poll(struct file *filep, poll_table *wait) +{ + struct LEAPIORAID_ADAPTER *ioc; + + poll_wait(filep, &leapioraid_ctl_poll_wait, wait); + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) { + if (ioc->aen_event_read_flag) { + spin_unlock(&leapioraid_gioc_lock); + return POLLIN | POLLRDNORM; + } + } + spin_unlock(&leapioraid_gioc_lock); + return 0; +} + +static int +leapioraid_ctl_set_task_mid(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_command *karg, + struct LeapioraidSCSITmgReq_t *tm_request) +{ + u8 found = 0; + u16 smid; + u16 handle; + struct scsi_cmnd *scmd; + struct LEAPIORAID_DEVICE *priv_data; + struct LeapioraidSCSITmgRep_t *tm_reply; + u32 sz; + u32 lun; + char *desc = NULL; + struct leapioraid_scsiio_tracker *st = NULL; + + if (tm_request->TaskType == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + desc = "abort_task"; + else if (tm_request->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + desc = "query_task"; + else + return 0; + lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); + handle = le16_to_cpu(tm_request->DevHandle); + for (smid = ioc->shost->can_queue; smid && !found; smid--) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (scmd == NULL || scmd->device == NULL || + scmd->device->hostdata == NULL) + continue; + if (lun != scmd->device->lun) + continue; + priv_data = scmd->device->hostdata; + if (priv_data->sas_target == NULL) + continue; + if (priv_data->sas_target->handle != handle) + continue; + st = leapioraid_base_scsi_cmd_priv(scmd); + if ((!st) || (st->smid == 0)) + continue; + if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) { + tm_request->TaskMID = cpu_to_le16(st->smid); + found = 1; + } + } + if (!found) { + dctlprintk(ioc, pr_info( + "%s %s: handle(0x%04x), lun(%d), no active mid!!\n", + ioc->name, desc, + le16_to_cpu(tm_request->DevHandle), + lun)); + tm_reply = ioc->ctl_cmds.reply; + tm_reply->DevHandle = tm_request->DevHandle; + tm_reply->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + tm_reply->TaskType = tm_request->TaskType; + tm_reply->MsgLength = + sizeof(struct LeapioraidSCSITmgRep_t) / 4; + tm_reply->VP_ID = tm_request->VP_ID; + tm_reply->VF_ID = tm_request->VF_ID; + sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 1; + } + dctlprintk(ioc, pr_info( + "%s %s: handle(0x%04x), lun(%d), task_mid(%d)\n", + ioc->name, desc, + le16_to_cpu(tm_request->DevHandle), lun, + le16_to_cpu(tm_request->TaskMID))); + return 0; +} + +static long +leapioraid_ctl_do_command(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_command karg, void __user *mf) +{ + struct LeapioraidReqHeader_t *mpi_request = NULL, *request; + struct LeapioraidDefaultRep_t *mpi_reply; + u16 smid; + unsigned long timeout; + u8 issue_reset; + u32 sz, sz_arg; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma = 0; + size_t data_out_sz = 0; + void *data_in = NULL; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + long ret; + u16 device_handle = LEAPIORAID_INVALID_DEVICE_HANDLE; + + issue_reset = 0; + if (ioc->ctl_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: ctl_cmd in use\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + ret = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (ret) + goto out; + mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); + if (!mpi_request) { + ret = -ENOMEM; + goto out; + } + if (karg.data_sge_offset * 4 > ioc->request_sz || + karg.data_sge_offset > (UINT_MAX / 4)) { + ret = -EINVAL; + goto out; + } + if (copy_from_user(mpi_request, mf, karg.data_sge_offset * 4)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + ret = -EFAULT; + goto out; + } + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT) { + smid = leapioraid_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err( + "%s %s: failed obtaining a smid\n", ioc->name, + __func__); + ret = -EAGAIN; + goto out; + } + } else { + smid = ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL; + } + ret = 0; + ioc->ctl_cmds.status = LEAPIORAID_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + request = leapioraid_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + memcpy(request, mpi_request, karg.data_sge_offset * 4); + ioc->ctl_cmds.smid = smid; + data_out_sz = karg.data_out_size; + data_in_sz = karg.data_in_size; + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + mpi_request->Function == LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH + || mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT + || mpi_request->Function == LEAPIORAID_FUNC_SATA_PASSTHROUGH) { + device_handle = le16_to_cpu(mpi_request->FunctionDependent1); + if (!device_handle || (device_handle > ioc->facts.MaxDevHandle)) { + ret = -EINVAL; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + } + if (data_out_sz) { + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz, + &data_out_dma, GFP_ATOMIC); + if (!data_out) { + ret = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + if (copy_from_user(data_out, karg.data_out_buf_ptr, + data_out_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -EFAULT; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + } + if (data_in_sz) { + data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz, + &data_in_dma, GFP_ATOMIC); + if (!data_in) { + ret = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + } + psge = (void *)request + (karg.data_sge_offset * 4); + leapioraid_ctl_display_some_debug(ioc, smid, "ctl_request", NULL); + init_completion(&ioc->ctl_cmds.done); + switch (mpi_request->Function) { + case LEAPIORAID_FUNC_SCSI_IO_REQUEST: + case LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH: + { + struct LeapioSCSIIOReq_t *scsiio_request = + (struct LeapioSCSIIOReq_t *) request; + scsiio_request->SenseBufferLength = + SCSI_SENSE_BUFFERSIZE; + scsiio_request->SenseBufferLowAddress = + leapioraid_base_get_sense_buffer_dma(ioc, smid); + memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + if (test_bit + (device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + pr_info( + "%s handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + if (mpi_request->Function == + LEAPIORAID_FUNC_SCSI_IO_REQUEST) + ioc->put_smid_scsi_io(ioc, smid, device_handle); + else + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_SCSI_TASK_MGMT: + { + struct LeapioraidSCSITmgReq_t *tm_request = + (struct LeapioraidSCSITmgReq_t *) request; + dtmprintk(ioc, + pr_info("%s TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", + ioc->name, + le16_to_cpu(tm_request->DevHandle), + tm_request->TaskType)); + ioc->got_task_abort_from_ioctl = 1; + if (tm_request->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + tm_request->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { + if (leapioraid_ctl_set_task_mid(ioc, &karg, tm_request)) { + leapioraid_base_free_smid(ioc, smid); + ioc->got_task_abort_from_ioctl = 0; + goto out; + } + } + ioc->got_task_abort_from_ioctl = 0; + if (test_bit + (device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + pr_info( + "%s handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + leapioraid_scsihost_set_tm_flag(ioc, + le16_to_cpu(tm_request->DevHandle)); + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_hi_priority(ioc, smid, 0); + break; + } + case LEAPIORAID_FUNC_SMP_PASSTHROUGH: + { + struct LeapioraidSmpPassthroughReq_t *smp_request = + (struct LeapioraidSmpPassthroughReq_t *) mpi_request; + u8 *data; + + if (!ioc->multipath_on_hba) + smp_request->PhysicalPort = 0xFF; + if (smp_request->PassthroughFlags & + 0x80) + data = (u8 *) &smp_request->SGL; + else { + if (unlikely(data_out == NULL)) { + pr_err( + "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + data = data_out; + } + if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_SATA_PASSTHROUGH: + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + if (test_bit + (device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + pr_info( + "%s handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_FW_DOWNLOAD: + case LEAPIORAID_FUNC_FW_UPLOAD: + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL: + { + struct LeapioraidSasIoUnitControlReq_t *sasiounit_request = + (struct LeapioraidSasIoUnitControlReq_t *) mpi_request; + if (sasiounit_request->Operation == + LEAPIORAID_SAS_OP_PHY_HARD_RESET + || sasiounit_request->Operation == + LEAPIORAID_SAS_OP_PHY_LINK_RESET) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + } + fallthrough; + default: + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + timeout = karg.timeout; + if (timeout < LEAPIORAID_IOCTL_DEFAULT_TIMEOUT) + timeout = LEAPIORAID_IOCTL_DEFAULT_TIMEOUT; + wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout * HZ); + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT) { + struct LeapioraidSCSITmgReq_t *tm_request = + (struct LeapioraidSCSITmgReq_t *) mpi_request; + leapioraid_scsihost_clear_tm_flag(ioc, + le16_to_cpu(tm_request->DevHandle)); + } else if ((mpi_request->Function == LEAPIORAID_FUNC_SMP_PASSTHROUGH + || mpi_request->Function == + LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL) + && ioc->ioc_link_reset_in_progress) { + ioc->ioc_link_reset_in_progress = 0; + ioc->ignore_loginfos = 0; + } + if (!(ioc->ctl_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + karg.data_sge_offset, issue_reset); + goto issue_host_reset; + } + mpi_reply = ioc->ctl_cmds.reply; + if (mpi_reply->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT && + (ioc->logging_level & LEAPIORAID_DEBUG_TM)) { + struct LeapioraidSCSITmgRep_t *tm_reply = + (struct LeapioraidSCSITmgRep_t *) mpi_reply; + pr_info( + "%s TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n", + ioc->name, + le16_to_cpu(tm_reply->IOCStatus), + le32_to_cpu(tm_reply->IOCLogInfo), + le32_to_cpu(tm_reply->TerminationCount)); + } + if (data_in_sz) { + if (copy_to_user(karg.data_in_buf_ptr, data_in, data_in_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + if (karg.max_reply_bytes) { + sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + if (karg.max_sense_bytes && (mpi_request->Function == + LEAPIORAID_FUNC_SCSI_IO_REQUEST + || mpi_request->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH)) { + if (karg.sense_data_ptr == NULL) { + pr_err( + "%s Response buffer provided by application is NULL; Response data will not be returned.\n", + ioc->name); + goto out; + } + sz_arg = SCSI_SENSE_BUFFERSIZE; + sz = min_t(u32, karg.max_sense_bytes, sz_arg); + if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, sz)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } +issue_host_reset: + if (issue_reset) { + ret = -ENODATA; + if ((mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST + || mpi_request->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH + || mpi_request->Function == + LEAPIORAID_FUNC_SATA_PASSTHROUGH)) { + pr_err( + "%s issue target reset: handle = (0x%04x)\n", + ioc->name, + le16_to_cpu(mpi_request->FunctionDependent1)); + leapioraid_halt_firmware(ioc, 0); + leapioraid_scsihost_issue_locked_tm(ioc, + le16_to_cpu + (mpi_request->FunctionDependent1), + 0, 0, 0, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + smid, 30, + LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET); + } else + leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + } +out: + if (data_in) + dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in, + data_in_dma); + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out, + data_out_dma); + kfree(mpi_request); + ioc->ctl_cmds.status = LEAPIORAID_CMD_NOT_USED; + return ret; +} + +static long +leapioraid_ctl_getiocinfo( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_iocinfo karg; + u8 revision; + + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + memset(&karg, 0, sizeof(karg)); + if (ioc->pfacts) + karg.port_number = ioc->pfacts[0].PortNumber; + pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); + karg.hw_rev = revision; + karg.pci_id = ioc->pdev->device; + karg.subsystem_device = ioc->pdev->subsystem_device; + karg.subsystem_vendor = ioc->pdev->subsystem_vendor; + karg.pci_information.u.bits.bus = ioc->pdev->bus->number; + karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); + karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); + karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); + karg.firmware_version = ioc->facts.FWVersion.Word; + strscpy(karg.driver_version, ioc->driver_name, sizeof(karg.driver_version)); + strcat(karg.driver_version, "-"); + karg.adapter_type = 0x06; + strcat(karg.driver_version, LEAPIORAID_DRIVER_VERSION); + karg.adapter_type = 0x07; + karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +static long +leapioraid_ctl_eventquery( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_eventquery karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + karg.event_entries = LEAPIORAID_CTL_EVENT_LOG_SIZE; + memcpy(karg.event_types, ioc->event_type, + LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +static long +leapioraid_ctl_eventenable( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_eventenable karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + memcpy(ioc->event_type, karg.event_types, + LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + leapioraid_base_validate_event_type(ioc, ioc->event_type); + if (ioc->event_log) + return 0; + ioc->event_context = 0; + ioc->aen_event_read_flag = 0; + ioc->event_log = kcalloc(LEAPIORAID_CTL_EVENT_LOG_SIZE, + sizeof(struct LEAPIORAID_IOCTL_EVENTS), + GFP_KERNEL); + if (!ioc->event_log) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENOMEM; + } + return 0; +} + +static long +leapioraid_ctl_eventreport( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_eventreport karg; + u32 number_bytes, max_events, max; + struct leapio_ioctl_eventreport __user *uarg = arg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + number_bytes = karg.hdr.max_data_size - + sizeof(struct leapio_ioctl_header); + max_events = number_bytes / sizeof(struct LEAPIORAID_IOCTL_EVENTS); + max = min_t(u32, LEAPIORAID_CTL_EVENT_LOG_SIZE, max_events); + if (!max || !ioc->event_log) + return -ENODATA; + number_bytes = max * sizeof(struct LEAPIORAID_IOCTL_EVENTS); + if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + ioc->aen_event_read_flag = 0; + return 0; +} + +static long +leapioraid_ctl_do_reset( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_diag_reset karg; + int retval; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + if (ioc->shost_recovery || + ioc->pci_error_recovery || ioc->is_driver_loading || + ioc->remove_host) + return -EAGAIN; + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + ioc->reset_from_user = 1; + scsi_block_requests(ioc->shost); + retval = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + scsi_unblock_requests(ioc->shost); + pr_info("%s ioctl: host reset: %s\n", + ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); + return 0; +} + +static int +leapioraid_ctl_btdh_search_sas_device(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_btdh_mapping *btdh) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->sas_device_list)) + return rc; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == sas_device->handle) { + btdh->bus = sas_device->channel; + btdh->id = sas_device->id; + rc = 1; + goto out; + } else if (btdh->bus == sas_device->channel && btdh->id == + sas_device->id && btdh->handle == 0xFFFF) { + btdh->handle = sas_device->handle; + rc = 1; + goto out; + } + } +out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +static int +leapioraid_ctl_btdh_search_raid_device(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_btdh_mapping *btdh) +{ + struct leapioraid_raid_device *raid_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->raid_device_list)) + return rc; + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == raid_device->handle) { + btdh->bus = raid_device->channel; + btdh->id = raid_device->id; + rc = 1; + goto out; + } else if (btdh->bus == raid_device->channel && btdh->id == + raid_device->id && btdh->handle == 0xFFFF) { + btdh->handle = raid_device->handle; + rc = 1; + goto out; + } + } +out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return rc; +} + +static long +leapioraid_ctl_btdh_mapping( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_btdh_mapping karg; + int rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + rc = leapioraid_ctl_btdh_search_sas_device(ioc, &karg); + if (!rc) + leapioraid_ctl_btdh_search_raid_device(ioc, &karg); + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +#ifdef CONFIG_COMPAT +static long +leapioraid_ctl_compat_command( + struct LEAPIORAID_ADAPTER *ioc, unsigned int cmd, + void __user *arg) +{ + struct leapio_ioctl_command32 karg32; + struct leapio_ioctl_command32 __user *uarg; + struct leapio_ioctl_command karg; + + if (_IOC_SIZE(cmd) != sizeof(struct leapio_ioctl_command32)) + return -EINVAL; + uarg = (struct leapio_ioctl_command32 __user *)arg; + if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + memset(&karg, 0, sizeof(struct leapio_ioctl_command)); + karg.hdr.ioc_number = karg32.hdr.ioc_number; + karg.hdr.port_number = karg32.hdr.port_number; + karg.hdr.max_data_size = karg32.hdr.max_data_size; + karg.timeout = karg32.timeout; + karg.max_reply_bytes = karg32.max_reply_bytes; + karg.data_in_size = karg32.data_in_size; + karg.data_out_size = karg32.data_out_size; + karg.max_sense_bytes = karg32.max_sense_bytes; + karg.data_sge_offset = karg32.data_sge_offset; + karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); + karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); + karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); + karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); + return leapioraid_ctl_do_command(ioc, karg, &uarg->mf); +} +#endif + +static long +leapioraid_ctl_ioctl_main( + struct file *file, unsigned int cmd, void __user *arg, + u8 compat) +{ + struct LEAPIORAID_ADAPTER *ioc; + struct leapio_ioctl_header ioctl_header; + enum leapioraid_block_state state; + long ret = -ENOIOCTLCMD; + + if (copy_from_user(&ioctl_header, (char __user *)arg, + sizeof(struct leapio_ioctl_header))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + if (leapioraid_ctl_verify_adapter(ioctl_header.ioc_number, + &ioc) == -1 || !ioc) + return -ENODEV; + mutex_lock(&ioc->pci_access_mutex); + if (ioc->shost_recovery || + ioc->pci_error_recovery || ioc->is_driver_loading || + ioc->remove_host) { + ret = -EAGAIN; + goto unlock_pci_access; + } + state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; + if (state == NON_BLOCKING) { + if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { + ret = -EAGAIN; + goto unlock_pci_access; + } + } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { + ret = -ERESTARTSYS; + goto unlock_pci_access; + } + switch (cmd) { + case LEAPIORAID_IOCINFO: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_iocinfo)) + ret = leapioraid_ctl_getiocinfo(ioc, arg); + break; +#ifdef CONFIG_COMPAT + case LEAPIORAID_COMMAND32: +#endif + case LEAPIORAID_COMMAND: + { + struct leapio_ioctl_command __user *uarg; + struct leapio_ioctl_command karg; + +#ifdef CONFIG_COMPAT + if (compat) { + ret = + leapioraid_ctl_compat_command(ioc, cmd, arg); + break; + } +#endif + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + ret = -EFAULT; + break; + } + if (karg.hdr.ioc_number != ioctl_header.ioc_number) { + ret = -EINVAL; + break; + } + if (_IOC_SIZE(cmd) == + sizeof(struct leapio_ioctl_command)) { + uarg = arg; + ret = + leapioraid_ctl_do_command(ioc, karg, + &uarg->mf); + } + break; + } + case LEAPIORAID_EVENTQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_eventquery)) + ret = leapioraid_ctl_eventquery(ioc, arg); + break; + case LEAPIORAID_EVENTENABLE: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_eventenable)) + ret = leapioraid_ctl_eventenable(ioc, arg); + break; + case LEAPIORAID_EVENTREPORT: + ret = leapioraid_ctl_eventreport(ioc, arg); + break; + case LEAPIORAID_HARDRESET: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_diag_reset)) + ret = leapioraid_ctl_do_reset(ioc, arg); + break; + case LEAPIORAID_BTDHMAPPING: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_btdh_mapping)) + ret = leapioraid_ctl_btdh_mapping(ioc, arg); + break; + default: + dctlprintk(ioc, pr_err( + "%s unsupported ioctl opcode(0x%08x)\n", + ioc->name, cmd)); + break; + } + mutex_unlock(&ioc->ctl_cmds.mutex); +unlock_pci_access: + mutex_unlock(&ioc->pci_access_mutex); + return ret; +} + +static long +leapioraid_ctl_ioctl( + struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + ret = leapioraid_ctl_ioctl_main(file, cmd, (void __user *)arg, 0); + return ret; +} + +#ifdef CONFIG_COMPAT +static long +leapioraid_ctl_ioctl_compat( + struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + ret = leapioraid_ctl_ioctl_main(file, cmd, (void __user *)arg, 1); + return ret; +} +#endif + +static ssize_t +version_fw_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF); +} +static DEVICE_ATTR_RO(version_fw); + +static ssize_t +version_bios_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (version & 0xFF000000) >> 24, + (version & 0x00FF0000) >> 16, + (version & 0x0000FF00) >> 8, version & 0x000000FF); +} +static DEVICE_ATTR_RO(version_bios); + +static ssize_t +version_leapioraid_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", + ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); +} +static DEVICE_ATTR_RO(version_leapioraid); + +static ssize_t +version_product_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); +} +static DEVICE_ATTR_RO(version_product); + +static ssize_t +version_nvdata_persistent_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); +} +static DEVICE_ATTR_RO(version_nvdata_persistent); + +static ssize_t +version_nvdata_default_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); +} +static DEVICE_ATTR_RO(version_nvdata_default); + +static ssize_t +board_name_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); +} +static DEVICE_ATTR_RO(board_name); + +static ssize_t +board_assembly_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); +} +static DEVICE_ATTR_RO(board_assembly); + +static ssize_t +board_tracer_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); +} +static DEVICE_ATTR_RO(board_tracer); + +static ssize_t +io_delay_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); +} +static DEVICE_ATTR_RO(io_delay); + +static ssize_t +device_delay_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); +} +static DEVICE_ATTR_RO(device_delay); + +static ssize_t +fw_queue_depth_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); +} +static DEVICE_ATTR_RO(fw_queue_depth); + +static ssize_t +host_sas_address_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)ioc->sas_hba.sas_address); +} +static DEVICE_ATTR_RO(host_sas_address); + +static ssize_t +logging_level_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); +} + +static ssize_t +logging_level_store( + struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + int val = 0; + + if (kstrtoint(buf, 0, &val)) + return -EINVAL; + ioc->logging_level = val; + pr_info("%s logging_level=%08xh\n", ioc->name, + ioc->logging_level); + return strlen(buf); +} +static DEVICE_ATTR_RW(logging_level); + +static ssize_t +fwfault_debug_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); +} + +static ssize_t +fwfault_debug_store( + struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + int val = 0; + + if (kstrtoint(buf, 0, &val)) + return -EINVAL; + ioc->fwfault_debug = val; + pr_info("%s fwfault_debug=%d\n", ioc->name, + ioc->fwfault_debug); + return strlen(buf); +} +static DEVICE_ATTR_RW(fwfault_debug); + +static +struct leapioraid_raid_device *leapioraid_ctl_raid_device_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } +out: + return r; +} + +u8 +leapioraid_ctl_tm_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + u8 rc; + unsigned long flags; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + u16 smid_task_abort; + u16 handle; + struct LeapioraidSCSITmgReq_t *mpi_request; + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + + rc = 1; + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return rc; + } + handle = le16_to_cpu(mpi_reply->DevHandle); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + smid_task_abort = 0; + if (mpi_reply->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + smid_task_abort = le16_to_cpu(mpi_request->TaskMID); + } + pr_info("\tcomplete: sas_addr(0x%016llx), handle(0x%04x), smid(%d), term(%d)\n", + (unsigned long long)sas_device->sas_address, handle, + (smid_task_abort ? smid_task_abort : smid), + le32_to_cpu(mpi_reply->TerminationCount)); + leapioraid_sas_device_put(sas_device); + } + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_ctl_raid_device_find_by_handle(ioc, handle); + if (raid_device) + pr_info("\tcomplete: wwid(0x%016llx), handle(0x%04x), smid(%d), term(%d)\n", + (unsigned long long)raid_device->wwid, handle, + smid, le32_to_cpu(mpi_reply->TerminationCount)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + ioc->terminated_tm_count += le32_to_cpu(mpi_reply->TerminationCount); + if (ioc->out_of_frames) { + rc = 0; + leapioraid_base_free_smid(ioc, smid); + ioc->out_of_frames = 0; + wake_up(&ioc->no_frames_tm_wq); + } + ioc->pending_tm_count--; + if (!ioc->pending_tm_count) + wake_up(&ioc->pending_tm_wq); + return rc; +} + +static void +leapioraid_ctl_tm_sysfs(struct LEAPIORAID_ADAPTER *ioc, u8 task_type) +{ + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid, handle, hpr_smid; + struct LEAPIORAID_DEVICE *device_priv_data; + struct LEAPIORAID_TARGET *target_priv_data; + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + unsigned long flags; + int tm_count; + int lun; + u32 doorbell; + struct leapioraid_scsiio_tracker *st; + u8 tr_method = 0x00; + + if (list_empty(&ioc->sas_device_list)) + return; + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->shost_recovery || ioc->remove_host) { + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + pr_err( + "%s %s: busy : host reset in progress, try later\n", + ioc->name, __func__); + return; + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + scsi_block_requests(ioc->shost); + init_waitqueue_head(&ioc->pending_tm_wq); + ioc->ignore_loginfos = 1; + ioc->pending_tm_count = 0; + ioc->terminated_tm_count = 0; + ioc->out_of_frames = 0; + tm_count = 0; + switch (task_type) { + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + if (list_empty(&ioc->hpr_free_list)) { + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + } + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + st = leapioraid_base_scsi_cmd_priv(scmd); + if ((!st) || (st->cb_idx == 0xFF) || (st->smid == 0)) + continue; + lun = scmd->device->lun; + device_priv_data = scmd->device->hostdata; + if (!device_priv_data || !device_priv_data->sas_target) + continue; + target_priv_data = device_priv_data->sas_target; + if (!target_priv_data) + continue; + if (target_priv_data->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT || + target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) + continue; + handle = device_priv_data->sas_target->handle; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err( + "%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + mpi_request->TaskMID = cpu_to_le16(st->smid); + int_to_scsilun(lun, + (struct scsi_lun *)mpi_request->LUN); + starget_printk(KERN_INFO, + device_priv_data->sas_target->starget, + "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long) + device_priv_data->sas_target->sas_address, handle, st->smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) + goto fault_in_progress; + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + } + if (!sas_device->starget) + continue; + if (test_bit(sas_device->handle, ioc->pd_handles)) + continue; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err( + "%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = + cpu_to_le16(sas_device->handle); + mpi_request->TaskType = + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + starget_printk(KERN_INFO, + sas_device->starget, + "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long)sas_device->sas_address, + sas_device->handle, + hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + spin_lock_irqsave(&ioc->raid_device_lock, + flags); + } + if (!raid_device->starget) + continue; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err("%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = + cpu_to_le16(raid_device->handle); + mpi_request->TaskType = + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + starget_printk(KERN_INFO, + raid_device->starget, + "sending tm: wwid(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long)raid_device->wwid, + raid_device->handle, hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + shost_for_each_device(sdev, ioc->shost) { + if (list_empty(&ioc->hpr_free_list)) { + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + } + device_priv_data = sdev->hostdata; + if (!device_priv_data || !device_priv_data->sas_target) + continue; + target_priv_data = device_priv_data->sas_target; + if (!target_priv_data) + continue; + if (target_priv_data->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + continue; + if ((target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) + && (task_type == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) + continue; + handle = device_priv_data->sas_target->handle; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err("%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + scsi_device_put(sdev); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = task_type; + mpi_request->MsgFlags = tr_method; + int_to_scsilun(sdev->lun, (struct scsi_lun *) + mpi_request->LUN); + sdev_printk(KERN_INFO, sdev, + "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long)target_priv_data->sas_address, + handle, hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + scsi_device_put(sdev); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + break; + } +out_of_frames: + if (ioc->pending_tm_count) + wait_event_timeout(ioc->pending_tm_wq, + !ioc->pending_tm_count, 30 * HZ); + pr_info("%s task management requests issued(%d)\n", + ioc->name, tm_count); + pr_info("%s number IO terminated(%d)\n", + ioc->name, ioc->terminated_tm_count); +fault_in_progress: + scsi_unblock_requests(ioc->shost); + ioc->ignore_loginfos = 0; +} + +static ssize_t +task_management_store( + struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + int opcode = 0; + + if (kstrtoint(buf, 0, &opcode)) + return -EINVAL; + switch (opcode) { + case 1: + ioc->reset_from_user = 1; + scsi_block_requests(ioc->shost); + pr_err("%s sysfs: diag reset issued: %s\n", ioc->name, + ((!leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER)) + ? "SUCCESS" : "FAILED")); + scsi_unblock_requests(ioc->shost); + break; + case 2: + ioc->reset_from_user = 1; + scsi_block_requests(ioc->shost); + pr_err("%s sysfs: message unit reset issued: %s\n", ioc->name, + ((!leapioraid_base_hard_reset_handler(ioc, + SOFT_RESET)) ? + "SUCCESS" : "FAILED")); + scsi_unblock_requests(ioc->shost); + break; + case 3: + pr_err("%s sysfs: TASKTYPE_ABORT_TASK :\n", ioc->name); + ioc->got_task_abort_from_sysfs = 1; + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK); + ioc->got_task_abort_from_sysfs = 0; + break; + case 4: + pr_err("%s sysfs: TASKTYPE_TARGET_RESET:\n", ioc->name); + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET); + break; + case 5: + pr_err("%s sysfs: TASKTYPE_LOGICAL_UNIT_RESET:\n", ioc->name); + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); + break; + case 6: + pr_info("%s sysfs: TASKTYPE_ABRT_TASK_SET\n", ioc->name); + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET); + break; + default: + pr_info("%s unsupported opcode(%d)\n", + ioc->name, opcode); + break; + }; + return strlen(buf); +} +static DEVICE_ATTR_WO(task_management); + +static ssize_t +ioc_reset_count_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); +} +static DEVICE_ATTR_RO(ioc_reset_count); + +static ssize_t +reply_queue_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u8 reply_queue_count; + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + if ((ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) + reply_queue_count = ioc->reply_queue_count; + else + reply_queue_count = 1; + return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); +} +static DEVICE_ATTR_RO(reply_queue_count); + +static ssize_t +drv_support_bitmap_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap); +} +static DEVICE_ATTR_RO(drv_support_bitmap); + +static ssize_t +enable_sdev_max_qd_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd); +} + +static ssize_t +enable_sdev_max_qd_store(struct device *cdev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + int val = 0; + struct scsi_device *sdev; + struct leapioraid_raid_device *raid_device; + int qdepth; + + if (kstrtoint(buf, 0, &val)) + return -EINVAL; + switch (val) { + case 0: + ioc->enable_sdev_max_qd = 0; + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + continue; + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + raid_device = + leapioraid_raid_device_find_by_handle(ioc, + sas_target_priv_data->handle); + switch (raid_device->volume_type) { + case LEAPIORAID_RAID_VOL_TYPE_RAID0: + if (raid_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = + LEAPIORAID_SAS_QUEUE_DEPTH; + else + qdepth = + LEAPIORAID_SATA_QUEUE_DEPTH; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1E: + case LEAPIORAID_RAID_VOL_TYPE_RAID1: + case LEAPIORAID_RAID_VOL_TYPE_RAID10: + case LEAPIORAID_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + } + } else + qdepth = + (sas_target_priv_data->sas_dev->port_type > + 1) ? ioc->max_wideport_qd : ioc->max_narrowport_qd; + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + } + break; + case 1: + ioc->enable_sdev_max_qd = 1; + shost_for_each_device(sdev, ioc->shost) { + leapioraid__scsihost_change_queue_depth(sdev, + shost->can_queue); + } + break; + default: + return -EINVAL; + } + return strlen(buf); +} +static DEVICE_ATTR_RW(enable_sdev_max_qd); + +static struct attribute *leapioraid_host_attrs[] = { + &dev_attr_version_fw.attr, + &dev_attr_version_bios.attr, + &dev_attr_version_leapioraid.attr, + &dev_attr_version_product.attr, + &dev_attr_version_nvdata_persistent.attr, + &dev_attr_version_nvdata_default.attr, + &dev_attr_board_name.attr, + &dev_attr_board_assembly.attr, + &dev_attr_board_tracer.attr, + &dev_attr_io_delay.attr, + &dev_attr_device_delay.attr, + &dev_attr_logging_level.attr, + &dev_attr_fwfault_debug.attr, + &dev_attr_fw_queue_depth.attr, + &dev_attr_host_sas_address.attr, + &dev_attr_task_management.attr, + &dev_attr_ioc_reset_count.attr, + &dev_attr_reply_queue_count.attr, + &dev_attr_drv_support_bitmap.attr, + &dev_attr_enable_sdev_max_qd.attr, + NULL, +}; + +static const struct attribute_group leapioraid_host_attr_group = { + .attrs = leapioraid_host_attrs +}; + +const struct attribute_group *leapioraid_host_groups[] = { + &leapioraid_host_attr_group, + NULL +}; + +static ssize_t +sas_address_show( + struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf( + buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)sas_device_priv_data->sas_target->sas_address); +} +static DEVICE_ATTR_RO(sas_address); + +static ssize_t +sas_device_handle_show( + struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%04x\n", + sas_device_priv_data->sas_target->handle); +} +static DEVICE_ATTR_RO(sas_device_handle); + +static ssize_t +sas_ncq_prio_enable_show( + struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", + sas_device_priv_data->ncq_prio_enable); +} + +static ssize_t +sas_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + int ncq_prio_enable = 0; + + if (kstrtoint(buf, 0, &ncq_prio_enable)) + return -EINVAL; + if (!leapioraid_scsihost_ncq_prio_supp(sdev)) + return -EINVAL; + sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; + return strlen(buf); +} +static DEVICE_ATTR_RW(sas_ncq_prio_enable); + +static struct attribute *leapioraid_dev_attrs[] = { + &dev_attr_sas_address.attr, + &dev_attr_sas_device_handle.attr, + &dev_attr_sas_ncq_prio_enable.attr, + NULL, +}; +static const struct attribute_group leapioraid_dev_attr_group = { + .attrs = leapioraid_dev_attrs +}; +const struct attribute_group *leapioraid_dev_groups[] = { + &leapioraid_dev_attr_group, + NULL +}; + +static const struct +file_operations leapioraid_ctl_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = leapioraid_ctl_ioctl, + .poll = leapioraid_ctl_poll, + .fasync = leapioraid_ctl_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = leapioraid_ctl_ioctl_compat, +#endif +}; + +static struct miscdevice leapioraid_ctl_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = LEAPIORAID_DEV_NAME, + .fops = &leapioraid_ctl_fops, +}; + +void leapioraid_ctl_init(void) +{ + leapioraid_async_queue = NULL; + if (misc_register(&leapioraid_ctl_dev) < 0) + pr_err("%s can't register misc device\n", + LEAPIORAID_DRIVER_NAME); + init_waitqueue_head(&leapioraid_ctl_poll_wait); +} + +void leapioraid_ctl_exit(void) +{ + struct LEAPIORAID_ADAPTER *ioc; + + list_for_each_entry(ioc, &leapioraid_ioc_list, list) { + kfree(ioc->event_log); + } + misc_deregister(&leapioraid_ctl_dev); +} diff --git a/drivers/scsi/leapioraid/leapioraid_func.c b/drivers/scsi/leapioraid/leapioraid_func.c new file mode 100644 index 000000000000..2d80a86da007 --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_func.c @@ -0,0 +1,7074 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" +#include +#include +#include + +static char *dest_ip = "127.0.0.1"; +module_param(dest_ip, charp, 0000); +MODULE_PARM_DESC(dest_ip, "Destination IP address"); + +static u16 port_no = 6666; +module_param(port_no, ushort, 0000); +MODULE_PARM_DESC(port_no, "Destination Port number"); +static struct sockaddr_in dest_addr; +static struct socket *sock; +static struct msghdr msg; + +#define LEAPIORAID_LOG_POLLING_INTERVAL 1 +static LEAPIORAID_CALLBACK leapioraid_callbacks[LEAPIORAID_MAX_CALLBACKS]; +#define LEAPIORAID_FAULT_POLLING_INTERVAL 1000 +#define LEAPIORAID_MAX_HBA_QUEUE_DEPTH 1024 + +static int smp_affinity_enable = 1; +module_param(smp_affinity_enable, int, 0444); +MODULE_PARM_DESC(smp_affinity_enable, + "SMP affinity feature enable/disable Default: enable(1)"); + +static int max_msix_vectors = -1; +module_param(max_msix_vectors, int, 0444); +MODULE_PARM_DESC(max_msix_vectors, " max msix vectors"); + +static int irqpoll_weight = -1; +module_param(irqpoll_weight, int, 0444); +MODULE_PARM_DESC(irqpoll_weight, + "irq poll weight (default= one fourth of HBA queue depth)"); + +static int leapioraid_fwfault_debug; + +static int perf_mode = -1; + +static int poll_queues; +module_param(poll_queues, int, 0444); +MODULE_PARM_DESC(poll_queues, + "Number of queues to be use for io_uring poll mode.\n\t\t" + "This parameter is effective only if host_tagset_enable=1. &\n\t\t" + "when poll_queues are enabled then &\n\t\t" + "perf_mode is set to latency mode. &\n\t\t"); + +enum leapioraid_perf_mode { + LEAPIORAID_PERF_MODE_DEFAULT = -1, + LEAPIORAID_PERF_MODE_BALANCED = 0, + LEAPIORAID_PERF_MODE_IOPS = 1, + LEAPIORAID_PERF_MODE_LATENCY = 2, +}; + +static void +leapioraid_base_clear_outstanding_leapioraid_commands( + struct LEAPIORAID_ADAPTER *ioc); +static +int leapioraid_base_wait_on_iocstate(struct LEAPIORAID_ADAPTER *ioc, + u32 ioc_state, int timeout); + +static int +leapioraid_scsihost_set_fwfault_debug( + const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct LEAPIORAID_ADAPTER *ioc; + + if (ret) + return ret; + pr_info("setting fwfault_debug(%d)\n", + leapioraid_fwfault_debug); + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) + ioc->fwfault_debug = leapioraid_fwfault_debug; + spin_unlock(&leapioraid_gioc_lock); + return 0; +} + +module_param_call( + leapioraid_fwfault_debug, + leapioraid_scsihost_set_fwfault_debug, + param_get_int, &leapioraid_fwfault_debug, 0644); + +static inline u32 +leapioraid_base_readl_aero( + const void __iomem *addr, u8 retry_count) +{ + u32 i = 0, ret_val; + + do { + ret_val = readl(addr); + i++; + } while (ret_val == 0 && i < retry_count); + return ret_val; +} + +u8 +leapioraid_base_check_cmd_timeout( + struct LEAPIORAID_ADAPTER *ioc, + U8 status, void *mpi_request, int sz) +{ + u8 issue_reset = 0; + + if (!(status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + pr_err("%s Command %s\n", ioc->name, + ((issue_reset == + 0) ? "terminated due to Host Reset" : "Timeout")); + leapioraid_debug_dump_mf(mpi_request, sz); + return issue_reset; +} + +static int +leapioraid_remove_dead_ioc_func(void *arg) +{ + struct LEAPIORAID_ADAPTER *ioc = (struct LEAPIORAID_ADAPTER *)arg; + struct pci_dev *pdev; + + if (ioc == NULL) + return -1; + pdev = ioc->pdev; + if (pdev == NULL) + return -1; +#if defined(DISABLE_RESET_SUPPORT) + ssleep(2); +#endif + + pci_stop_and_remove_bus_device(pdev); + return 0; +} + +u8 +leapioraid_base_pci_device_is_unplugged(struct LEAPIORAID_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + struct pci_bus *bus = pdev->bus; + int devfn = pdev->devfn; + u32 vendor_id; + + if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendor_id)) + return 1; + if (vendor_id == 0xffffffff || vendor_id == 0x00000000 || + vendor_id == 0x0000ffff || vendor_id == 0xffff0000) + return 1; + if ((vendor_id & 0xffff) == 0x0001) + return 1; + return 0; +} + +u8 +leapioraid_base_pci_device_is_available(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->pci_error_recovery + || leapioraid_base_pci_device_is_unplugged(ioc)) + return 0; + return 1; +} + +static void +leapioraid_base_sync_drv_fw_timestamp(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIoUnitControlReq_t *mpi_request; + struct LeapioraidIoUnitControlRep_t *mpi_reply; + u16 smid; + ktime_t current_time; + u64 TimeStamp = 0; + u8 issue_reset = 0; + + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s: scsih_cmd in use %s\n", ioc->name, __func__); + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err("%s: failed obtaining a smid %s\n", ioc->name, __func__); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL; + mpi_request->Operation = 0x0F; + mpi_request->IOCParameter = 0x81; + current_time = ktime_get_real(); + TimeStamp = ktime_to_ms(current_time); + mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); + mpi_request->IOCParameterValue2 = cpu_to_le32(TimeStamp >> 32); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + dinitprintk(ioc, pr_err( + "%s Io Unit Control Sync TimeStamp (sending), @time %lld ms\n", + ioc->name, TimeStamp)); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + 10 * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, + mpi_request, + sizeof + (struct LeapioraidSasIoUnitControlReq_t) + / 4, issue_reset); + goto issue_host_reset; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + dinitprintk(ioc, pr_err( + "%s Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; +out: + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +static int +leapioraid_udp_init(void) +{ + int ret; + u32 ip; + + if (sock) + return 0; + if (!in4_pton(dest_ip, -1, (u8 *) &ip, -1, NULL)) { + pr_err("Invalid IP address: %s, set to default: 127.0.0.1\n", + dest_ip); + dest_ip = "127.0.0.1"; + } + ret = + sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, IPPROTO_UDP, + &sock); + memset(&dest_addr, 0, sizeof(dest_addr)); + dest_addr.sin_family = AF_INET; + dest_addr.sin_addr.s_addr = ip; + dest_addr.sin_port = htons(port_no); + memset(&msg, 0, sizeof(msg)); + msg.msg_name = &dest_addr; + msg.msg_namelen = sizeof(struct sockaddr_in); + return ret; +} + +static void +leapioraid_udp_exit(void) +{ + if (sock) + sock_release(sock); +} + +static int +leapioraid_send_udp_pkg(void *buf, U32 datasize) +{ + int ret; + struct kvec vec; + + vec.iov_len = datasize; + vec.iov_base = buf; + ret = kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); + if (ret <= 0) { + pr_err_ratelimited("Sending UDP packet failed: errorno = %d", + ret); + return 0; + } else { + return ret; + } +} + +static void +leapioraid_base_pcie_log_work(struct work_struct *work) +{ + struct LEAPIORAID_ADAPTER *ioc = + container_of(work, struct LEAPIORAID_ADAPTER, pcie_log_work.work); + unsigned long flags; + u32 host_logbuf_position, ioc_logbuf_position; + u32 datasize, offset, send_sz, actual_send_sz; + + while (true) { + host_logbuf_position = + ioc->base_readl(&ioc->chip->HostLogBufPosition, 0); + ioc_logbuf_position = + ioc->base_readl(&ioc->chip->IocLogBufPosition, 0); + datasize = ioc_logbuf_position - host_logbuf_position; + offset = host_logbuf_position % SYS_LOG_BUF_SIZE; + if (datasize == 0) { + goto rearm_timer; + } else if (datasize > SYS_LOG_BUF_SIZE) { + pr_err("log thread error:data size overflow\n"); + return; + } + + if (offset + datasize > SYS_LOG_BUF_SIZE) + send_sz = SYS_LOG_BUF_SIZE - offset; + else + send_sz = datasize; + + if (send_sz > MAX_UPD_PAYLOAD_SZ) + send_sz = MAX_UPD_PAYLOAD_SZ; + + actual_send_sz = + leapioraid_send_udp_pkg(ioc->log_buffer + offset, send_sz); + host_logbuf_position += actual_send_sz; + writel(host_logbuf_position, &ioc->chip->HostLogBufPosition); + } +rearm_timer: + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->pcie_log_work_q) + queue_delayed_work(ioc->pcie_log_work_q, + &ioc->pcie_log_work, + msecs_to_jiffies(LEAPIORAID_LOG_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +void +leapioraid_base_start_log_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->pcie_log_work_q) + return; + leapioraid_udp_init(); + INIT_DELAYED_WORK(&ioc->pcie_log_work, leapioraid_base_pcie_log_work); + snprintf(ioc->pcie_log_work_q_name, + sizeof(ioc->pcie_log_work_q_name), "poll_%s%u_status", + ioc->driver_name, ioc->id); + ioc->pcie_log_work_q = + create_singlethread_workqueue(ioc->pcie_log_work_q_name); + if (!ioc->pcie_log_work_q) { + pr_err("%s %s: failed (line=%d)\n", ioc->name, + __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->pcie_log_work_q) + queue_delayed_work(ioc->pcie_log_work_q, + &ioc->pcie_log_work, + msecs_to_jiffies(LEAPIORAID_LOG_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +void +leapioraid_base_stop_log_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->pcie_log_work_q; + ioc->pcie_log_work_q = NULL; + leapioraid_udp_exit(); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->pcie_log_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +static void +leapioraid_base_fault_reset_work(struct work_struct *work) +{ + struct LEAPIORAID_ADAPTER *ioc = + container_of(work, struct LEAPIORAID_ADAPTER, + fault_reset_work.work); + unsigned long flags; + u32 doorbell; + int rc; + struct task_struct *p; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || + ioc->pci_error_recovery || ioc->remove_host) + goto rearm_timer; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_MASK) { + pr_err( + "%s SAS host is non-operational !!!!\n", ioc->name); + if (ioc->non_operational_loop++ < 5) { + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + goto rearm_timer; + } + ioc->remove_host = 1; + leapioraid_base_pause_mq_polling(ioc); + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + p = kthread_run(leapioraid_remove_dead_ioc_func, ioc, + "%s_dead_ioc_%d", ioc->driver_name, ioc->id); + if (IS_ERR(p)) + pr_err( + "%s %s: Running leapioraid_dead_ioc thread failed !!!!\n", + ioc->name, __func__); + else + pr_err( + "%s %s: Running leapioraid_dead_ioc thread success !!!!\n", + ioc->name, __func__); + return; + } + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP) { + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + 15; + timeout /= (LEAPIORAID_FAULT_POLLING_INTERVAL / 1000); + if (ioc->ioc_coredump_loop == 0) { + leapioraid_base_coredump_info(ioc, doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, + flags); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_clear_outstanding_scsi_tm_commands + (ioc); + leapioraid_base_mask_interrupts(ioc); + leapioraid_base_clear_outstanding_leapioraid_commands(ioc); + leapioraid_ctl_clear_outstanding_ioctls(ioc); + } + drsprintk(ioc, + pr_info("%s %s: CoreDump loop %d.", + ioc->name, __func__, ioc->ioc_coredump_loop)); + if (ioc->ioc_coredump_loop++ < timeout) { + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + goto rearm_timer; + } + } + if (ioc->ioc_coredump_loop) { + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_COREDUMP) + pr_err( + "%s %s: CoreDump completed. LoopCount: %d", + ioc->name, __func__, ioc->ioc_coredump_loop); + else + pr_err( + "%s %s: CoreDump Timed out. LoopCount: %d", + ioc->name, __func__, ioc->ioc_coredump_loop); + ioc->ioc_coredump_loop = 0xFF; + } + ioc->non_operational_loop = 0; + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_OPERATIONAL) { + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + pr_warn("%s %s: hard reset: %s\n", ioc->name, + __func__, (rc == 0) ? "success" : "failed"); + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, + doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + } else if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) + leapioraid_base_coredump_info(ioc, + doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + if (rc + && (doorbell & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_OPERATIONAL) + return; + } + ioc->ioc_coredump_loop = 0; + if (ioc->time_sync_interval && + ++ioc->timestamp_update_count >= ioc->time_sync_interval) { + ioc->timestamp_update_count = 0; + leapioraid_base_sync_drv_fw_timestamp(ioc); + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +rearm_timer: + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +static void +leapioraid_base_hba_hot_unplug_work(struct work_struct *work) +{ + struct LEAPIORAID_ADAPTER *ioc = + container_of(work, struct LEAPIORAID_ADAPTER, + hba_hot_unplug_work.work); + unsigned long flags; + + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (ioc->shost_recovery || ioc->pci_error_recovery) + goto rearm_timer; + if (leapioraid_base_pci_device_is_unplugged(ioc)) { + if (ioc->remove_host) { + pr_err("%s The host is removeing!!!\n", + ioc->name); + goto rearm_timer; + } + ioc->remove_host = 1; + leapioraid_base_clear_outstanding_leapioraid_commands(ioc); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_clear_outstanding_scsi_tm_commands(ioc); + leapioraid_ctl_clear_outstanding_ioctls(ioc); + } +rearm_timer: + if (ioc->hba_hot_unplug_work_q) + queue_delayed_work(ioc->hba_hot_unplug_work_q, + &ioc->hba_hot_unplug_work, + msecs_to_jiffies + (1000)); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); +} + +void +leapioraid_base_start_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->fault_reset_work_q) + return; + ioc->timestamp_update_count = 0; + INIT_DELAYED_WORK(&ioc->fault_reset_work, + leapioraid_base_fault_reset_work); + snprintf(ioc->fault_reset_work_q_name, + sizeof(ioc->fault_reset_work_q_name), "poll_%s%u_status", + ioc->driver_name, ioc->id); + ioc->fault_reset_work_q = + create_singlethread_workqueue(ioc->fault_reset_work_q_name); + if (!ioc->fault_reset_work_q) { + pr_err("%s %s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->open_pcie_trace) + leapioraid_base_start_log_watchdog(ioc); +} + +void +leapioraid_base_stop_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->fault_reset_work_q; + ioc->fault_reset_work_q = NULL; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } + if (ioc->open_pcie_trace) + leapioraid_base_stop_log_watchdog(ioc); +} + +void +leapioraid_base_start_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->hba_hot_unplug_work_q) + return; + INIT_DELAYED_WORK(&ioc->hba_hot_unplug_work, + leapioraid_base_hba_hot_unplug_work); + snprintf(ioc->hba_hot_unplug_work_q_name, + sizeof(ioc->hba_hot_unplug_work_q_name), + "poll_%s%u_hba_unplug", ioc->driver_name, ioc->id); + ioc->hba_hot_unplug_work_q = + create_singlethread_workqueue(ioc->hba_hot_unplug_work_q_name); + if (!ioc->hba_hot_unplug_work_q) { + pr_err("%s %s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (ioc->hba_hot_unplug_work_q) + queue_delayed_work(ioc->hba_hot_unplug_work_q, + &ioc->hba_hot_unplug_work, + msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); +} + +void +leapioraid_base_stop_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + wq = ioc->hba_hot_unplug_work_q; + ioc->hba_hot_unplug_work_q = NULL; + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->hba_hot_unplug_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +static void +leapioraid_base_stop_smart_polling(struct LEAPIORAID_ADAPTER *ioc) +{ + struct workqueue_struct *wq; + + wq = ioc->smart_poll_work_q; + ioc->smart_poll_work_q = NULL; + if (wq) { + if (!cancel_delayed_work(&ioc->smart_poll_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +void +leapioraid_base_fault_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code) +{ + pr_err("%s fault_state(0x%04x)!\n", + ioc->name, fault_code); +} + +void +leapioraid_base_coredump_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code) +{ + pr_err("%s coredump_state(0x%04x)!\n", + ioc->name, fault_code); +} + +int +leapioraid_base_wait_for_coredump_completion(struct LEAPIORAID_ADAPTER *ioc, + const char *caller) +{ + u8 timeout = + (ioc->manu_pg11.CoreDumpTOSec) ? ioc->manu_pg11.CoreDumpTOSec : 15; + int ioc_state = + leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_FAULT, + timeout); + + if (ioc_state) + pr_err("%s %s: CoreDump timed out. (ioc_state=0x%x)\n", + ioc->name, caller, ioc_state); + else + pr_info("%s %s: CoreDump completed. (ioc_state=0x%x)\n", + ioc->name, caller, ioc_state); + return ioc_state; +} + +void +leapioraid_halt_firmware(struct LEAPIORAID_ADAPTER *ioc, u8 set_fault) +{ + u32 doorbell; + + if ((!ioc->fwfault_debug) && (!set_fault)) + return; + if (!set_fault) + dump_stack(); + doorbell = + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) + == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, doorbell); + } else if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) + leapioraid_base_coredump_info(ioc, + doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + else { + writel(0xC0FFEE00, &ioc->chip->Doorbell); + if (!set_fault) + pr_err("%s Firmware is halted due to command timeout\n", + ioc->name); + } + if (set_fault) + return; + if (ioc->fwfault_debug == 2) { + for (;;) + ; + } else + panic("panic in %s\n", __func__); +} + +static void +leapioraid_base_group_cpus_on_irq(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q; + unsigned int i, cpu, group, nr_cpus, nr_msix, index = 0; + int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + int unmanaged_q_count = ioc->high_iops_queues + iopoll_q_count; + + cpu = cpumask_first(cpu_online_mask); + nr_msix = ioc->reply_queue_count - unmanaged_q_count; + nr_cpus = num_online_cpus(); + group = nr_cpus / nr_msix; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) + continue; + if (cpu >= nr_cpus) + break; + if (index < nr_cpus % nr_msix) + group++; + for (i = 0; i < group; i++) { + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + cpu = cpumask_next(cpu, cpu_online_mask); + } + index++; + } +} + +static void +leapioraid_base_sas_ioc_info(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidDefaultRep_t *mpi_reply, + struct LeapioraidReqHeader_t *request_hdr) +{ + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + char *desc = NULL; + u16 frame_sz; + char *func_str = NULL; + + if (request_hdr->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + request_hdr->Function == LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH + || request_hdr->Function == LEAPIORAID_FUNC_EVENT_NOTIFICATION) + return; + if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE) + return; + switch (ioc_status) { + case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION: + desc = "invalid function"; + break; + case LEAPIORAID_IOCSTATUS_BUSY: + desc = "busy"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_SGL: + desc = "invalid sgl"; + break; + case LEAPIORAID_IOCSTATUS_INTERNAL_ERROR: + desc = "internal error"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_VPID: + desc = "invalid vpid"; + break; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES: + desc = "insufficient resources"; + break; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + desc = "insufficient power"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_FIELD: + desc = "invalid field"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_STATE: + desc = "invalid state"; + break; + case LEAPIORAID_IOCSTATUS_OP_STATE_NOT_SUPPORTED: + desc = "op state not supported"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_ACTION: + desc = "config invalid action"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_TYPE: + desc = "config invalid type"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_DATA: + desc = "config invalid data"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_NO_DEFAULTS: + desc = "config no defaults"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_CANT_COMMIT: + desc = "config can not commit"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN: + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + break; + case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR: + if (!ioc->disable_eedp_support) + desc = "eedp guard error"; + break; + case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR: + if (!ioc->disable_eedp_support) + desc = "eedp ref tag error"; + break; + case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR: + if (!ioc->disable_eedp_support) + desc = "eedp app tag error"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_INVALID_IO_INDEX: + desc = "target invalid io index"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_ABORTED: + desc = "target aborted"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: + desc = "target no conn retryable"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_NO_CONNECTION: + desc = "target no connection"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: + desc = "target xfer count mismatch"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: + desc = "target data offset error"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: + desc = "target too much write data"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_IU_TOO_SHORT: + desc = "target iu too short"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: + desc = "target ack nak timeout"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_NAK_RECEIVED: + desc = "target nak received"; + break; + case LEAPIORAID_IOCSTATUS_SAS_SMP_REQUEST_FAILED: + desc = "smp request failed"; + break; + case LEAPIORAID_IOCSTATUS_SAS_SMP_DATA_OVERRUN: + desc = "smp data overrun"; + break; + default: + break; + } + if (!desc) + return; + switch (request_hdr->Function) { + case LEAPIORAID_FUNC_CONFIG: + frame_sz = sizeof(struct LeapioraidCfgReq_t) + ioc->sge_size; + func_str = "config_page"; + break; + case LEAPIORAID_FUNC_SCSI_TASK_MGMT: + frame_sz = sizeof(struct LeapioraidSCSITmgReq_t); + func_str = "task_mgmt"; + break; + case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL: + frame_sz = sizeof(struct LeapioraidSasIoUnitControlReq_t); + func_str = "sas_iounit_ctl"; + break; + case LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR: + frame_sz = sizeof(struct LeapioraidSepReq_t); + func_str = "enclosure"; + break; + case LEAPIORAID_FUNC_IOC_INIT: + frame_sz = sizeof(struct LeapioraidIOCInitReq_t); + func_str = "ioc_init"; + break; + case LEAPIORAID_FUNC_PORT_ENABLE: + frame_sz = sizeof(struct LeapioraidPortEnableReq_t); + func_str = "port_enable"; + break; + case LEAPIORAID_FUNC_SMP_PASSTHROUGH: + frame_sz = + sizeof(struct LeapioraidSmpPassthroughReq_t) + ioc->sge_size; + func_str = "smp_passthru"; + break; + default: + frame_sz = 32; + func_str = "unknown"; + break; + } + pr_warn("%s ioc_status: %s(0x%04x), request(0x%p), (%s)\n", + ioc->name, desc, ioc_status, request_hdr, func_str); + leapioraid_debug_dump_mf(request_hdr, frame_sz / 4); +} + +static void +leapioraid_base_display_event_data(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventNotificationRep_t *mpi_reply) +{ + char *desc = NULL; + u16 event; + + if (!(ioc->logging_level & LEAPIORAID_DEBUG_EVENTS)) + return; + event = le16_to_cpu(mpi_reply->Event); + if (ioc->warpdrive_msg) { + switch (event) { + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + case LEAPIORAID_EVENT_IR_VOLUME: + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + case LEAPIORAID_EVENT_LOG_ENTRY_ADDED: + return; + } + } + switch (event) { + case LEAPIORAID_EVENT_LOG_DATA: + desc = "Log Data"; + break; + case LEAPIORAID_EVENT_STATE_CHANGE: + desc = "Status Change"; + break; + case LEAPIORAID_EVENT_HARD_RESET_RECEIVED: + desc = "Hard Reset Received"; + break; + case LEAPIORAID_EVENT_EVENT_CHANGE: + desc = "Event Change"; + break; + case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE: + desc = "Device Status Change"; + break; + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + desc = "IR Operation Status"; + break; + case LEAPIORAID_EVENT_SAS_DISCOVERY: + { + struct LeapioraidEventDataSasDiscovery_t *event_data = + (struct LeapioraidEventDataSasDiscovery_t *) mpi_reply->EventData; + pr_info("%s SAS Discovery: (%s)", + ioc->name, + (event_data->ReasonCode == + LEAPIORAID_EVENT_SAS_DISC_RC_STARTED) ? "start" : + "stop"); + if (event_data->DiscoveryStatus) + pr_info("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_info("\n"); + return; + } + case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE: + desc = "SAS Broadcast Primitive"; + break; + case LEAPIORAID_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: + desc = "SAS Init Device Status Change"; + break; + case LEAPIORAID_EVENT_SAS_INIT_TABLE_OVERFLOW: + desc = "SAS Init Table Overflow"; + break; + case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + desc = "SAS Topology Change List"; + break; + case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + desc = "SAS Enclosure Device Status Change"; + break; + case LEAPIORAID_EVENT_IR_VOLUME: + desc = "IR Volume"; + break; + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + desc = "IR Physical Disk"; + break; + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + desc = "IR Configuration Change List"; + break; + case LEAPIORAID_EVENT_LOG_ENTRY_ADDED: + desc = "Log Entry Added"; + break; + case LEAPIORAID_EVENT_TEMP_THRESHOLD: + desc = "Temperature Threshold"; + break; + case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + desc = "SAS Device Discovery Error"; + break; + } + if (!desc) + return; + pr_info("%s %s\n", ioc->name, desc); +} + +static void +leapioraid_base_sas_log_info(struct LEAPIORAID_ADAPTER *ioc, u32 log_info) +{ + union loginfo_type { + u32 loginfo; + struct { + u32 subcode:16; + u32 code:8; + u32 originator:4; + u32 bus_type:4; + } dw; + }; + union loginfo_type sas_loginfo; + char *originator_str = NULL; + + sas_loginfo.loginfo = log_info; + if (sas_loginfo.dw.bus_type != 3) + return; + if (log_info == 0x31170000) + return; + if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == + 0x31140000 || log_info == 0x31130000)) + return; + switch (sas_loginfo.dw.originator) { + case 0: + originator_str = "IOP"; + break; + case 1: + originator_str = "PL"; + break; + case 2: + if (ioc->warpdrive_msg) + originator_str = "WarpDrive"; + else + originator_str = "IR"; + break; + } + pr_warn("%s log_info(0x%08x):\n\t\t" + "originator(%s), code(0x%02x), sub_code(0x%04x)\n", + ioc->name, + log_info, + originator_str, + sas_loginfo.dw.code, + sas_loginfo.dw.subcode); +} + +static void +leapioraid_base_display_reply_info(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + u16 ioc_status; + u32 loginfo = 0; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + if ((ioc_status & LEAPIORAID_IOCSTATUS_MASK) && + (ioc->logging_level & LEAPIORAID_DEBUG_REPLY)) { + leapioraid_base_sas_ioc_info(ioc, mpi_reply, + leapioraid_base_get_msg_frame(ioc, + smid)); + } + if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { + loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); + leapioraid_base_sas_log_info(ioc, loginfo); + } +} + +u8 +leapioraid_base_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply && mpi_reply->Function == LEAPIORAID_FUNC_EVENT_ACK) + return leapioraid_check_for_pending_internal_cmds(ioc, smid); + if (ioc->base_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + ioc->base_cmds.status |= LEAPIORAID_CMD_COMPLETE; + if (mpi_reply) { + ioc->base_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + memcpy(ioc->base_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + } + ioc->base_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->base_cmds.done); + return 1; +} + +static u8 +leapioraid_base_async_event( + struct LEAPIORAID_ADAPTER *ioc, u8 msix_index, u32 reply) +{ + struct LeapioraidEventNotificationRep_t *mpi_reply; + struct LeapioraidEventAckReq_t *ack_request; + u16 smid; + struct leapioraid_event_ack_list *delayed_event_ack; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + if (mpi_reply->Function != LEAPIORAID_FUNC_EVENT_NOTIFICATION) + return 1; + leapioraid_base_display_event_data(ioc, mpi_reply); + if (!(mpi_reply->AckRequired & LEAPIORAID_EVENT_NOTIFICATION_ACK_REQUIRED)) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + delayed_event_ack = + kzalloc(sizeof(*delayed_event_ack), GFP_ATOMIC); + if (!delayed_event_ack) + goto out; + INIT_LIST_HEAD(&delayed_event_ack->list); + delayed_event_ack->Event = mpi_reply->Event; + delayed_event_ack->EventContext = mpi_reply->EventContext; + list_add_tail(&delayed_event_ack->list, + &ioc->delayed_event_ack_list); + dewtprintk(ioc, pr_err( + "%s DELAYED: EVENT ACK: event (0x%04x)\n", + ioc->name, + le16_to_cpu(mpi_reply->Event))); + goto out; + } + ack_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(struct LeapioraidEventAckReq_t)); + ack_request->Function = LEAPIORAID_FUNC_EVENT_ACK; + ack_request->Event = mpi_reply->Event; + ack_request->EventContext = mpi_reply->EventContext; + ack_request->VF_ID = 0; + ack_request->VP_ID = 0; + ioc->put_smid_default(ioc, smid); +out: + leapioraid_scsihost_event_callback(ioc, msix_index, reply); + leapioraid_ctl_event_callback(ioc, msix_index, reply); + return 1; +} + +inline +struct leapioraid_scsiio_tracker *leapioraid_base_scsi_cmd_priv( + struct scsi_cmnd *scmd) +{ + return scsi_cmd_priv(scmd); +} + +struct leapioraid_scsiio_tracker *leapioraid_get_st_from_smid( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct scsi_cmnd *cmd; + + if (WARN_ON(!smid) || WARN_ON(smid >= ioc->hi_priority_smid)) + return NULL; + cmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (cmd) + return leapioraid_base_scsi_cmd_priv(cmd); + return NULL; +} + +static u8 +leapioraid_base_get_cb_idx(struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + int i; + u16 ctl_smid = ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL; + u16 discovery_smid = + ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY; + u8 cb_idx = 0xFF; + + if (smid < ioc->hi_priority_smid) { + struct leapioraid_scsiio_tracker *st; + + if (smid < ctl_smid) { + st = leapioraid_get_st_from_smid(ioc, smid); + if (st) + cb_idx = st->cb_idx; + } else if (smid < discovery_smid) + cb_idx = ioc->ctl_cb_idx; + else + cb_idx = ioc->scsih_cb_idx; + } else if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + cb_idx = ioc->hpr_lookup[i].cb_idx; + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + cb_idx = ioc->internal_lookup[i].cb_idx; + } + return cb_idx; +} + +void +leapioraid_base_pause_mq_polling(struct LEAPIORAID_ADAPTER *ioc) +{ + int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->blk_mq_poll_queues[qid].pause, 1); + for (qid = 0; qid < iopoll_q_count; qid++) { + while (atomic_read(&ioc->blk_mq_poll_queues[qid].busy)) { + cpu_relax(); + udelay(500); + } + } +} + +void +leapioraid_base_resume_mq_polling(struct LEAPIORAID_ADAPTER *ioc) +{ + int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->blk_mq_poll_queues[qid].pause, 0); +} + +void +leapioraid_base_mask_interrupts(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 him_register; + + ioc->mask_interrupts = 1; + him_register = + ioc->base_readl(&ioc->chip->HostInterruptMask, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + him_register |= + 0x00000001 + 0x00000008 + 0x40000000; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->base_readl(&ioc->chip->HostInterruptMask, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); +} + +void +leapioraid_base_unmask_interrupts(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 him_register; + + him_register = + ioc->base_readl(&ioc->chip->HostInterruptMask, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + him_register &= ~0x00000008; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->mask_interrupts = 0; +} + +union leapioraid_reply_descriptor { + u64 word; + struct { + u32 low; + u32 high; + } u; +}; + +static int +leapioraid_base_process_reply_queue( + struct leapioraid_adapter_reply_queue *reply_q) +{ + union leapioraid_reply_descriptor rd; + u64 completed_cmds; + u8 request_descript_type; + u16 smid; + u8 cb_idx; + u32 reply; + u8 msix_index = reply_q->msix_index; + struct LEAPIORAID_ADAPTER *ioc = reply_q->ioc; + union LeapioraidRepDescUnion_t *rpf; + u8 rc; + + completed_cmds = 0; + if (!atomic_add_unless(&reply_q->busy, 1, 1)) + return completed_cmds; + rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; + request_descript_type = rpf->Default.ReplyFlags + & LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK; + if (request_descript_type == LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED) { + atomic_dec(&reply_q->busy); + return 1; + } + cb_idx = 0xFF; + do { + rd.word = le64_to_cpu(rpf->Words); + if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) + goto out; + reply = 0; + smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); + if (request_descript_type == + LEAPIORAID_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || + request_descript_type == + LEAPIORAID_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { + cb_idx = leapioraid_base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < LEAPIORAID_MAX_CALLBACKS)) && + (likely(leapioraid_callbacks[cb_idx] != NULL))) { + rc = leapioraid_callbacks[cb_idx] (ioc, smid, + msix_index, 0); + if (rc) + leapioraid_base_free_smid(ioc, smid); + } + } else if (request_descript_type == + LEAPIORAID_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { + reply = + le32_to_cpu(rpf->AddressReply.ReplyFrameAddress); + if (reply > ioc->reply_dma_max_address + || reply < ioc->reply_dma_min_address) + reply = 0; + if (smid) { + cb_idx = leapioraid_base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < LEAPIORAID_MAX_CALLBACKS)) && + (likely(leapioraid_callbacks[cb_idx] != NULL))) { + rc = leapioraid_callbacks[cb_idx] (ioc, + smid, + msix_index, + reply); + if (reply) + leapioraid_base_display_reply_info + (ioc, smid, msix_index, + reply); + if (rc) + leapioraid_base_free_smid(ioc, + smid); + } + } else { + leapioraid_base_async_event(ioc, msix_index, reply); + } + if (reply) { + ioc->reply_free_host_index = + (ioc->reply_free_host_index == + (ioc->reply_free_queue_depth - 1)) ? + 0 : ioc->reply_free_host_index + 1; + ioc->reply_free[ioc->reply_free_host_index] = + cpu_to_le32(reply); + wmb(); /* Make sure that all write ops are in order */ + writel(ioc->reply_free_host_index, + &ioc->chip->ReplyFreeHostIndex); + } + } + rpf->Words = cpu_to_le64(ULLONG_MAX); + reply_q->reply_post_host_index = + (reply_q->reply_post_host_index == + (ioc->reply_post_queue_depth - 1)) ? 0 : + reply_q->reply_post_host_index + 1; + request_descript_type = + reply_q->reply_post_free[reply_q->reply_post_host_index].Default.ReplyFlags + & LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK; + completed_cmds++; + if (completed_cmds >= ioc->thresh_hold) { + if (ioc->combined_reply_queue) { + writel(reply_q->reply_post_host_index | + ((msix_index & 7) << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index / + 8]); + } else { + writel(reply_q->reply_post_host_index | + (msix_index << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + } + if (!reply_q->is_blk_mq_poll_q && + !reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = true; + irq_poll_sched(&reply_q->irqpoll); + } + atomic_dec(&reply_q->busy); + return completed_cmds; + } + if (request_descript_type == LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED) + goto out; + if (!reply_q->reply_post_host_index) + rpf = reply_q->reply_post_free; + else + rpf++; + } while (1); +out: + if (!completed_cmds) { + atomic_dec(&reply_q->busy); + return completed_cmds; + } + wmb(); /* Make sure that all write ops are in order */ + if (ioc->combined_reply_queue) { + writel(reply_q->reply_post_host_index | ((msix_index & 7) << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index / 8]); + } else { + writel(reply_q->reply_post_host_index | (msix_index << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + } + atomic_dec(&reply_q->busy); + return completed_cmds; +} + +int leapioraid_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct LEAPIORAID_ADAPTER *ioc = + (struct LEAPIORAID_ADAPTER *)shost->hostdata; + struct leapioraid_adapter_reply_queue *reply_q; + int num_entries = 0; + int qid = queue_num - ioc->iopoll_q_start_index; + + if (atomic_read(&ioc->blk_mq_poll_queues[qid].pause) || + !atomic_add_unless(&ioc->blk_mq_poll_queues[qid].busy, 1, 1)) + return 0; + reply_q = ioc->blk_mq_poll_queues[qid].reply_q; + num_entries = leapioraid_base_process_reply_queue(reply_q); + atomic_dec(&ioc->blk_mq_poll_queues[qid].busy); + return num_entries; +} + +static irqreturn_t +leapioraid_base_interrupt(int irq, void *bus_id) +{ + struct leapioraid_adapter_reply_queue *reply_q = bus_id; + struct LEAPIORAID_ADAPTER *ioc = reply_q->ioc; + + if (ioc->mask_interrupts) + return IRQ_NONE; + if (reply_q->irq_poll_scheduled) + return IRQ_HANDLED; + return ((leapioraid_base_process_reply_queue(reply_q) > 0) ? + IRQ_HANDLED : IRQ_NONE); +} + +static +int leapioraid_base_irqpoll(struct irq_poll *irqpoll, int budget) +{ + struct leapioraid_adapter_reply_queue *reply_q; + int num_entries = 0; + + reply_q = container_of(irqpoll, + struct leapioraid_adapter_reply_queue, irqpoll); + if (reply_q->irq_line_enable) { + disable_irq_nosync(reply_q->os_irq); + reply_q->irq_line_enable = false; + } + num_entries = leapioraid_base_process_reply_queue(reply_q); + if (num_entries < budget) { + irq_poll_complete(irqpoll); + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + } + return num_entries; +} + +static void +leapioraid_base_init_irqpolls(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + if (reply_q->is_blk_mq_poll_q) + continue; + irq_poll_init(&reply_q->irqpoll, ioc->thresh_hold, + leapioraid_base_irqpoll); + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + reply_q->os_irq = pci_irq_vector(ioc->pdev, + reply_q->msix_index); + } +} + +static inline int +leapioraid_base_is_controller_msix_enabled(struct LEAPIORAID_ADAPTER *ioc) +{ + return (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; +} + +void +leapioraid_base_sync_reply_irqs(struct LEAPIORAID_ADAPTER *ioc, u8 poll) +{ + struct leapioraid_adapter_reply_queue *reply_q; + + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + return; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) + return; + if (reply_q->msix_index == 0) + continue; + if (reply_q->is_blk_mq_poll_q) { + leapioraid_base_process_reply_queue(reply_q); + continue; + } + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); + if (reply_q->irq_poll_scheduled) { + irq_poll_disable(&reply_q->irqpoll); + irq_poll_enable(&reply_q->irqpoll); + if (reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + } + } + if (poll) + leapioraid_base_process_reply_queue(reply_q); + } +} + +void +leapioraid_base_release_callback_handler(u8 cb_idx) +{ + leapioraid_callbacks[cb_idx] = NULL; +} + +u8 +leapioraid_base_register_callback_handler(LEAPIORAID_CALLBACK cb_func) +{ + u8 cb_idx; + + for (cb_idx = LEAPIORAID_MAX_CALLBACKS - 1; cb_idx; cb_idx--) + if (leapioraid_callbacks[cb_idx] == NULL) + break; + leapioraid_callbacks[cb_idx] = cb_func; + return cb_idx; +} + +void +leapioraid_base_initialize_callback_handler(void) +{ + u8 cb_idx; + + for (cb_idx = 0; cb_idx < LEAPIORAID_MAX_CALLBACKS; cb_idx++) + leapioraid_base_release_callback_handler(cb_idx); +} + +static void +leapioraid_base_build_zero_len_sge( + struct LEAPIORAID_ADAPTER *ioc, void *paddr) +{ + u32 flags_length = (u32) ((LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT) << + LEAPIORAID_SGE_FLAGS_SHIFT); + + ioc->base_add_sg_single(paddr, flags_length, -1); +} + +static void +leapioraid_base_add_sg_single_32(void *paddr, u32 flags_length, + dma_addr_t dma_addr) +{ + struct LeapioSGESimple32_t *sgel = paddr; + + flags_length |= (LEAPIORAID_SGE_FLAGS_32_BIT_ADDRESSING | + LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS) << + LEAPIORAID_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le32(dma_addr); +} + +static void +leapioraid_base_add_sg_single_64(void *paddr, u32 flags_length, + dma_addr_t dma_addr) +{ + struct LeapioSGESimple64_t *sgel = paddr; + + flags_length |= (LEAPIORAID_SGE_FLAGS_64_BIT_ADDRESSING | + LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS) << + LEAPIORAID_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le64(dma_addr); +} + +static +struct leapioraid_chain_tracker *leapioraid_base_get_chain_buffer_tracker( + struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + struct leapioraid_chain_tracker *chain_req; + struct leapioraid_scsiio_tracker *st = leapioraid_base_scsi_cmd_priv(scmd); + u16 smid = st->smid; + u8 chain_offset = + atomic_read(&ioc->chain_lookup[smid - 1].chain_offset); + + if (chain_offset == ioc->chains_needed_per_io) + return NULL; + chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset]; + atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset); + return chain_req; +} + +static void +leapioraid_base_build_sg(struct LEAPIORAID_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz) +{ + u32 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + leapioraid_base_build_zero_len_sge(ioc, psge); + return; + } + if (data_out_sz && data_in_sz) { + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + psge += ioc->sge_size; + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } else if (data_out_sz) { + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + } else if (data_in_sz) { + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } +} + +u32 +leapioraid_base_mod64(u64 dividend, u32 divisor) +{ + u32 remainder; + + if (!divisor) { + pr_err("leapioraid : DIVISOR is zero, in div fn\n"); + return 0; + } + remainder = do_div(dividend, divisor); + return remainder; +} + +static void +leapioraid_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, + u32 length, dma_addr_t dma_addr) +{ + struct LEAPIORAID_IEEE_SGE_CHAIN64 *sgel = paddr; + + sgel->Flags = flags; + sgel->NextChainOffset = chain_offset; + sgel->Length = cpu_to_le32(length); + sgel->Address = cpu_to_le64(dma_addr); +} + +static void +leapioraid_base_build_zero_len_sge_ieee(struct LEAPIORAID_ADAPTER *ioc, + void *paddr) +{ + u8 sgl_flags = (LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST); + + leapioraid_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); +} + +static int +leapioraid_base_build_sg_scmd_ieee(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid) +{ + struct LeapioraidSCSIIOReq_t *mpi_request; + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_offset; + u32 chain_length; + int sges_left; + u32 sges_in_segment; + u8 simple_sgl_flags; + u8 simple_sgl_flags_last; + u8 chain_sgl_flags; + struct leapioraid_chain_tracker *chain_req; + + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + simple_sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + simple_sgl_flags_last = simple_sgl_flags | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST; + chain_sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_CHAIN_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) { + pr_err_ratelimited + ("sd %s: scsi_dma_map failed: request for %d bytes!\n", + dev_name(&scmd->device->sdev_gendev), scsi_bufflen(scmd)); + return -ENOMEM; + } + sg_local = &mpi_request->SGL; + sges_in_segment = (ioc->request_sz - + offsetof(struct LeapioraidSCSIIOReq_t, + SGL)) / ioc->sge_size_ieee; + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + mpi_request->ChainOffset = (sges_in_segment - 1) + + (offsetof(struct LeapioraidSCSIIOReq_t, SGL) / ioc->sge_size_ieee); + while (sges_in_segment > 1) { + leapioraid_base_add_sg_single_ieee(sg_local, simple_sgl_flags, + 0, sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + chain_req = leapioraid_base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + sges_in_segment = (sges_left <= + ioc->max_sges_in_chain_message) ? sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (sges_left == sges_in_segment) ? + 0 : sges_in_segment; + chain_length = sges_in_segment * ioc->sge_size_ieee; + if (chain_offset) + chain_length += ioc->sge_size_ieee; + leapioraid_base_add_sg_single_ieee(sg_local, chain_sgl_flags, + chain_offset, chain_length, + chain_dma); + sg_local = chain; + if (!chain_offset) + goto fill_in_last_segment; + while (sges_in_segment) { + leapioraid_base_add_sg_single_ieee(sg_local, + simple_sgl_flags, 0, + sg_dma_len(sg_scmd), + sg_dma_address + (sg_scmd)); + + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + chain_req = leapioraid_base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); +fill_in_last_segment: + while (sges_left > 0) { + if (sges_left == 1) + leapioraid_base_add_sg_single_ieee(sg_local, + simple_sgl_flags_last, + 0, + sg_dma_len(sg_scmd), + sg_dma_address + (sg_scmd)); + else + leapioraid_base_add_sg_single_ieee(sg_local, + simple_sgl_flags, 0, + sg_dma_len(sg_scmd), + sg_dma_address + (sg_scmd)); + + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + } + return 0; +} + +static void +leapioraid_base_build_sg_ieee(struct LEAPIORAID_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz) +{ + u8 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + leapioraid_base_build_zero_len_sge_ieee(ioc, psge); + return; + } + if (data_out_sz && data_in_sz) { + sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_out_sz, data_out_dma); + psge += ioc->sge_size_ieee; + sgl_flags |= LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_in_sz, data_in_dma); + } else if (data_out_sz) { + sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_out_sz, data_out_dma); + } else if (data_in_sz) { + sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_in_sz, data_in_dma); + } +} + +#define leapioraid_convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) +static int +leapioraid_base_config_dma_addressing(struct LEAPIORAID_ADAPTER *ioc, + struct pci_dev *pdev) +{ + struct sysinfo s; + char *desc = "64"; + u64 consistant_dma_mask = DMA_BIT_MASK(64); + u64 dma_mask = DMA_BIT_MASK(64); + + consistant_dma_mask = DMA_BIT_MASK(63); + dma_mask = DMA_BIT_MASK(63); + desc = "63"; + ioc->dma_mask = 63; + if (ioc->use_32bit_dma) + consistant_dma_mask = DMA_BIT_MASK(32); + if (sizeof(dma_addr_t) > 4) { + if (!dma_set_mask(&pdev->dev, dma_mask) && + !dma_set_coherent_mask(&pdev->dev, consistant_dma_mask)) { + ioc->base_add_sg_single = + &leapioraid_base_add_sg_single_64; + ioc->sge_size = sizeof(struct LeapioSGESimple64_t); + if (!ioc->use_32bit_dma) + goto out; + return 0; + } + } + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) + && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { + ioc->base_add_sg_single = &leapioraid_base_add_sg_single_32; + ioc->sge_size = sizeof(struct LeapioSGESimple32_t); + desc = "32"; + ioc->dma_mask = 32; + } else + return -ENODEV; +out: + si_meminfo(&s); + pr_info("%s %s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", + ioc->name, desc, leapioraid_convert_to_kb(s.totalram)); + return 0; +} + +int +leapioraid_base_check_and_get_msix_vectors(struct pci_dev *pdev) +{ + int base; + u16 message_control, msix_vector_count; + + base = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (!base) + return -EINVAL; + pci_read_config_word(pdev, base + 2, &message_control); + msix_vector_count = (message_control & 0x3FF) + 1; + return msix_vector_count; +} + +enum leapioraid_pci_bus_speed { + LEAPIORAID_PCIE_SPEED_2_5GT = 0x14, + LEAPIORAID_PCIE_SPEED_5_0GT = 0x15, + LEAPIORAID_PCIE_SPEED_8_0GT = 0x16, + LEAPIORAID_PCIE_SPEED_16_0GT = 0x17, + LEAPIORAID_PCI_SPEED_UNKNOWN = 0xff, +}; + +const unsigned char leapioraid_pcie_link_speed[] = { + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCIE_SPEED_2_5GT, + LEAPIORAID_PCIE_SPEED_5_0GT, + LEAPIORAID_PCIE_SPEED_8_0GT, + LEAPIORAID_PCIE_SPEED_16_0GT, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN +}; + +static void +leapioraid_base_check_and_enable_high_iops_queues( + struct LEAPIORAID_ADAPTER *ioc, + int hba_msix_vector_count, + int iopoll_q_count) +{ + u16 lnksta; + enum leapioraid_pci_bus_speed speed; + + if (perf_mode == LEAPIORAID_PERF_MODE_IOPS || + perf_mode == LEAPIORAID_PERF_MODE_LATENCY || iopoll_q_count) { + ioc->high_iops_queues = 0; + return; + } + if (perf_mode == LEAPIORAID_PERF_MODE_DEFAULT) { + pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta); + speed = leapioraid_pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + dev_info(&ioc->pdev->dev, "PCIe device speed is %s\n", + speed == LEAPIORAID_PCIE_SPEED_2_5GT ? "2.5GHz" : + speed == LEAPIORAID_PCIE_SPEED_5_0GT ? "5.0GHz" : + speed == LEAPIORAID_PCIE_SPEED_8_0GT ? "8.0GHz" : + speed == LEAPIORAID_PCIE_SPEED_16_0GT ? "16.0GHz" : + "Unknown"); + if (speed < LEAPIORAID_PCIE_SPEED_16_0GT) { + ioc->high_iops_queues = 0; + return; + } + } + if (!reset_devices && + hba_msix_vector_count == LEAPIORAID_GEN35_MAX_MSIX_QUEUES && + num_online_cpus() >= LEAPIORAID_HIGH_IOPS_REPLY_QUEUES && + max_msix_vectors == -1) + ioc->high_iops_queues = LEAPIORAID_HIGH_IOPS_REPLY_QUEUES; + else + ioc->high_iops_queues = 0; +} + +void +leapioraid_base_disable_msix(struct LEAPIORAID_ADAPTER *ioc) +{ + if (!ioc->msix_enable) + return; + pci_free_irq_vectors(ioc->pdev); + kfree(ioc->blk_mq_poll_queues); + ioc->msix_enable = 0; +} + +void +leapioraid_base_free_irq(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + list_del(&reply_q->list); + if (reply_q->is_blk_mq_poll_q) { + kfree(reply_q); + continue; + } + irq_poll_disable(&reply_q->irqpoll); + if (ioc->smp_affinity_enable) + irq_set_affinity_hint(pci_irq_vector(ioc->pdev, + reply_q->msix_index), NULL); + free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), + reply_q); + kfree(reply_q); + } +} + +static int +leapioraid_base_request_irq(struct LEAPIORAID_ADAPTER *ioc, u8 index) +{ + struct leapioraid_adapter_reply_queue *reply_q; + int r; + u8 qid; + + reply_q = kzalloc(sizeof(struct leapioraid_adapter_reply_queue), + GFP_KERNEL); + if (!reply_q) + return -ENOMEM; + + reply_q->ioc = ioc; + reply_q->msix_index = index; + atomic_set(&reply_q->busy, 0); + if (index >= ioc->iopoll_q_start_index) { + qid = index - ioc->iopoll_q_start_index; + snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%u-mq-poll%u", + ioc->driver_name, ioc->id, qid); + reply_q->is_blk_mq_poll_q = 1; + ioc->blk_mq_poll_queues[qid].reply_q = reply_q; + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; + } + if (ioc->msix_enable) + snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%u-msix%u", + ioc->driver_name, ioc->id, index); + else + snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%d", + ioc->driver_name, ioc->id); + r = request_irq(pci_irq_vector(ioc->pdev, index), leapioraid_base_interrupt, + IRQF_SHARED, reply_q->name, reply_q); + if (r) { + pr_err("%s unable to allocate interrupt %d!\n", reply_q->name, + pci_irq_vector(ioc->pdev, index)); + kfree(reply_q); + return -EBUSY; + } + + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; +} + +static int leapioraid_base_alloc_irq_vectors(struct LEAPIORAID_ADAPTER *ioc) +{ + int i, irq_flags = PCI_IRQ_MSIX; + struct irq_affinity desc = {.pre_vectors = ioc->high_iops_queues }; + struct irq_affinity *descp = &desc; + int nr_msix_vectors = ioc->iopoll_q_start_index; + + if (ioc->smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; + else + descp = NULL; + dinitprintk(ioc, pr_err( + "%s high_iops_queues: %d,\n\t\t" + "reply_queue_count: %d, nr_msix_vectors: %d\n", + ioc->name, + ioc->high_iops_queues, + ioc->reply_queue_count, + nr_msix_vectors)); + i = pci_alloc_irq_vectors_affinity( + ioc->pdev, + ioc->high_iops_queues, + nr_msix_vectors, irq_flags, descp); + return i; +} + +static int +leapioraid_base_enable_msix(struct LEAPIORAID_ADAPTER *ioc) +{ + int r, i, msix_vector_count, local_max_msix_vectors; + int iopoll_q_count = 0; + + ioc->msix_load_balance = false; + msix_vector_count = + leapioraid_base_check_and_get_msix_vectors(ioc->pdev); + if (msix_vector_count <= 0) { + dfailprintk(ioc, pr_info("%s msix not supported\n", ioc->name)); + goto try_ioapic; + } + dinitprintk(ioc, pr_err( + "%s MSI-X vectors supported: %d, no of cores: %d\n", + ioc->name, msix_vector_count, ioc->cpu_count)); + ioc->reply_queue_count = min_t(int, ioc->cpu_count, msix_vector_count); + if (!ioc->rdpq_array_enable && max_msix_vectors == -1) { + if (reset_devices) + local_max_msix_vectors = 1; + else + local_max_msix_vectors = 8; + } else + local_max_msix_vectors = max_msix_vectors; + if (local_max_msix_vectors == 0) + goto try_ioapic; + if (!ioc->combined_reply_queue) { + pr_err( + "%s combined reply queue is off, so enabling msix load balance\n", + ioc->name); + ioc->msix_load_balance = true; + } + if (ioc->msix_load_balance) + ioc->smp_affinity_enable = 0; + if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1) + ioc->shost->host_tagset = 0; + if (ioc->shost->host_tagset) + iopoll_q_count = poll_queues; + if (iopoll_q_count) { + ioc->blk_mq_poll_queues = kcalloc(iopoll_q_count, + sizeof(struct + leapioraid_blk_mq_poll_queue), + GFP_KERNEL); + if (!ioc->blk_mq_poll_queues) + iopoll_q_count = 0; + } + leapioraid_base_check_and_enable_high_iops_queues(ioc, + msix_vector_count, + iopoll_q_count); + ioc->reply_queue_count = + min_t(int, ioc->reply_queue_count + ioc->high_iops_queues, + msix_vector_count); + if (local_max_msix_vectors > 0) + ioc->reply_queue_count = min_t(int, local_max_msix_vectors, + ioc->reply_queue_count); + if (iopoll_q_count) { + if (ioc->reply_queue_count < (iopoll_q_count + 1)) + iopoll_q_count = 0; + ioc->reply_queue_count = + min(ioc->reply_queue_count + iopoll_q_count, + msix_vector_count); + } + ioc->iopoll_q_start_index = ioc->reply_queue_count - iopoll_q_count; + r = leapioraid_base_alloc_irq_vectors(ioc); + if (r < 0) { + pr_warn( + "%s pci_alloc_irq_vectors failed (r=%d) !!!\n", + ioc->name, r); + goto try_ioapic; + } + ioc->msix_enable = 1; + for (i = 0; i < ioc->reply_queue_count; i++) { + r = leapioraid_base_request_irq(ioc, i); + if (r) { + leapioraid_base_free_irq(ioc); + leapioraid_base_disable_msix(ioc); + goto try_ioapic; + } + } + dinitprintk(ioc, + pr_info("%s High IOPs queues : %s\n", + ioc->name, + ioc->high_iops_queues ? "enabled" : "disabled")); + return 0; +try_ioapic: + ioc->high_iops_queues = 0; + dinitprintk(ioc, pr_err( + "%s High IOPs queues : disabled\n", ioc->name)); + ioc->reply_queue_count = 1; + ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; + r = leapioraid_base_request_irq(ioc, 0); + return r; +} + +static void +leapioraid_base_import_managed_irqs_affinity( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q; + unsigned int cpu, nr_msix; + int local_numa_node; + unsigned int index = 0; + + nr_msix = ioc->reply_queue_count; + if (!nr_msix) + return; + if (ioc->smp_affinity_enable) { + if (ioc->high_iops_queues) { + local_numa_node = dev_to_node(&ioc->pdev->dev); + for (index = 0; index < ioc->high_iops_queues; index++) { + irq_set_affinity_hint(pci_irq_vector(ioc->pdev, + index), + cpumask_of_node + (local_numa_node)); + } + } + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + const cpumask_t *mask; + + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) + continue; + mask = pci_irq_get_affinity(ioc->pdev, + reply_q->msix_index); + if (!mask) { + dinitprintk(ioc, pr_warn( + "%s no affinity for msi %x\n", + ioc->name, + reply_q->msix_index)); + goto fall_back; + } + for_each_cpu_and(cpu, mask, cpu_online_mask) { + if (cpu >= ioc->cpu_msix_table_sz) + break; + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + } + } + return; + } +fall_back: + leapioraid_base_group_cpus_on_irq(ioc); +} + +static void +leapioraid_base_assign_reply_queues(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q; + int reply_queue; + + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + return; + if (ioc->msix_load_balance) + return; + memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); + if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) { + ioc->reply_queue_count = ioc->facts.MaxMSIxVectors; + reply_queue = 0; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + reply_q->msix_index = reply_queue; + if (++reply_queue == ioc->reply_queue_count) + reply_queue = 0; + } + } + leapioraid_base_import_managed_irqs_affinity(ioc); +} + +static int +leapioraid_base_wait_for_doorbell_int( + struct LEAPIORAID_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 1000 * timeout; + do { + int_status = + ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } + usleep_range(1000, 1100); + count++; + } while (--cntdn); + pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +static int +leapioraid_base_spin_on_doorbell_int(struct LEAPIORAID_ADAPTER *ioc, + int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 2000 * timeout; + do { + int_status = + ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } + udelay(500); + count++; + } while (--cntdn); + pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +static int +leapioraid_base_wait_for_doorbell_ack(struct LEAPIORAID_ADAPTER *ioc, + int timeout) +{ + u32 cntdn, count; + u32 int_status; + u32 doorbell; + + count = 0; + cntdn = 1000 * timeout; + do { + int_status = + ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + if (!(int_status & LEAPIORAID_HIS_SYS2IOC_DB_STATUS)) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } else if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) { + doorbell = + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, doorbell); + return -EFAULT; + } + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, doorbell); + return -EFAULT; + } + } else if (int_status == 0xFFFFFFFF) + goto out; + usleep_range(1000, 1100); + count++; + } while (--cntdn); +out: + pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +static int +leapioraid_base_wait_for_doorbell_not_used(struct LEAPIORAID_ADAPTER *ioc, + int timeout) +{ + u32 cntdn, count; + u32 doorbell_reg; + + count = 0; + cntdn = 1000 * timeout; + do { + doorbell_reg = + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if (!(doorbell_reg & LEAPIORAID_DOORBELL_USED)) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } + usleep_range(1000, 1100); + count++; + } while (--cntdn); + pr_err("%s %s: failed due to timeout count(%d), doorbell_reg(%x)!\n", + ioc->name, __func__, count, doorbell_reg); + return -EFAULT; +} + +static int +leapioraid_base_handshake_req_reply_wait(struct LEAPIORAID_ADAPTER *ioc, + int request_bytes, u32 *request, + int reply_bytes, u16 *reply, + int timeout) +{ + struct LeapioraidDefaultRep_t *default_reply + = (struct LeapioraidDefaultRep_t *) reply; + int i; + u8 failed; + __le32 *mfp; + + if ((ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) & LEAPIORAID_DOORBELL_USED)) { + pr_err("%s doorbell is in use (line=%d)\n", ioc->name, __LINE__); + return -EFAULT; + } + if (ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE) & + LEAPIORAID_HIS_IOC2SYS_DB_STATUS) + writel(0, &ioc->chip->HostInterruptStatus); + writel(((LEAPIORAID_FUNC_HANDSHAKE << LEAPIORAID_DOORBELL_FUNCTION_SHIFT) + | ((request_bytes / 4) << LEAPIORAID_DOORBELL_ADD_DWORDS_SHIFT)), + &ioc->chip->Doorbell); + if ((leapioraid_base_spin_on_doorbell_int(ioc, 5))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + writel(0, &ioc->chip->HostInterruptStatus); + if ((leapioraid_base_wait_for_doorbell_ack(ioc, 5))) { + pr_err("%s doorbell handshake ack failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + for (i = 0, failed = 0; i < request_bytes / 4 && !failed; i++) { + writel((u32) (request[i]), &ioc->chip->Doorbell); + if ((leapioraid_base_wait_for_doorbell_ack(ioc, 5))) + failed = 1; + } + if (failed) { + pr_err("%s doorbell handshake sending request failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if ((leapioraid_base_wait_for_doorbell_int(ioc, timeout))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + reply[0] = + (u16) (ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) + & LEAPIORAID_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + if ((leapioraid_base_wait_for_doorbell_int(ioc, 5))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + reply[1] = + (u16) (ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) + & LEAPIORAID_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + for (i = 2; i < default_reply->MsgLength * 2; i++) { + if ((leapioraid_base_wait_for_doorbell_int(ioc, 5))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if (i >= reply_bytes / 2) + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + else + reply[i] = + (u16) (ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) + & LEAPIORAID_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + } + if (leapioraid_base_wait_for_doorbell_int(ioc, 5)) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if (leapioraid_base_wait_for_doorbell_not_used(ioc, 5) != 0) { + dhsprintk(ioc, + pr_info("%s doorbell is in use (line=%d)\n", + ioc->name, __LINE__)); + } + writel(0, &ioc->chip->HostInterruptStatus); + if (ioc->logging_level & LEAPIORAID_DEBUG_INIT) { + mfp = (__le32 *) reply; + pr_info("%s \toffset:data\n", ioc->name); + for (i = 0; i < reply_bytes / 4; i++) + pr_info("%s \t[0x%02x]:%08x\n", + ioc->name, i * 4, le32_to_cpu(mfp[i])); + } + return 0; +} + +static int +leapioraid_base_wait_on_iocstate( + struct LEAPIORAID_ADAPTER *ioc, u32 ioc_state, + int timeout) +{ + u32 count, cntdn; + u32 current_state; + + count = 0; + cntdn = 1000 * timeout; + do { + current_state = leapioraid_base_get_iocstate(ioc, 1); + if (current_state == ioc_state) + return 0; + if (count && current_state == LEAPIORAID_IOC_STATE_FAULT) + break; + usleep_range(1000, 1100); + count++; + } while (--cntdn); + return current_state; +} + +static inline void +leapioraid_base_dump_reg_set(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned int i, sz = 256; + u32 __iomem *reg = (u32 __iomem *) ioc->chip; + + pr_info("%s System Register set:\n", ioc->name); + for (i = 0; i < (sz / sizeof(u32)); i++) + pr_info("%08x: %08x\n", (i * 4), readl(®[i])); +} + +int +leapioraid_base_unlock_and_get_host_diagnostic( + struct LEAPIORAID_ADAPTER *ioc, + u32 *host_diagnostic) +{ + u32 count; + + *host_diagnostic = 0; + count = 0; + do { + drsprintk(ioc, pr_info("%s write magic sequence\n", ioc->name)); + writel(0x0, &ioc->chip->WriteSequence); + writel(0xF, &ioc->chip->WriteSequence); + writel(0x4, &ioc->chip->WriteSequence); + writel(0xB, &ioc->chip->WriteSequence); + writel(0x2, &ioc->chip->WriteSequence); + writel(0x7, &ioc->chip->WriteSequence); + writel(0xD, &ioc->chip->WriteSequence); + msleep(100); + if (count++ > 20) { + pr_err("%s Giving up writing magic sequence after 20 retries\n", + ioc->name); + leapioraid_base_dump_reg_set(ioc); + return -EFAULT; + } + *host_diagnostic = + ioc->base_readl(&ioc->chip->HostDiagnostic, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + drsprintk(ioc, pr_info( + "%s wrote magic sequence: cnt(%d), host_diagnostic(0x%08x)\n", + ioc->name, count, *host_diagnostic)); + } while ((*host_diagnostic & 0x00000080) == 0); + return 0; +} + +void +leapioraid_base_lock_host_diagnostic(struct LEAPIORAID_ADAPTER *ioc) +{ + drsprintk(ioc, pr_info("%s disable writes to the diagnostic register\n", + ioc->name)); + writel(0x0, &ioc->chip->WriteSequence); +} + +static int +leapioraid_base_diag_reset(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 host_diagnostic; + u32 ioc_state; + u32 count; + u32 hcb_size; + + pr_info("%s sending diag reset !!\n", ioc->name); + drsprintk(ioc, + pr_info("%s Locking pci cfg space access\n", + ioc->name)); + pci_cfg_access_lock(ioc->pdev); + drsprintk(ioc, pr_info("%s clear interrupts\n", + ioc->name)); + mutex_lock(&ioc->hostdiag_unlock_mutex); + if (leapioraid_base_unlock_and_get_host_diagnostic + (ioc, &host_diagnostic)) { + mutex_unlock(&ioc->hostdiag_unlock_mutex); + goto out; + } + hcb_size = + ioc->base_readl(&ioc->chip->HCBSize, LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + drsprintk(ioc, + pr_info("%s diag reset: issued\n", + ioc->name)); + writel(host_diagnostic | LEAPIORAID_DIAG_RESET_ADAPTER, + &ioc->chip->HostDiagnostic); +#if defined(DISABLE_RESET_SUPPORT) + count = 0; + do { + msleep(50); + host_diagnostic = + ioc->base_readl(&ioc->chip->HostDiagnostic, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if (host_diagnostic == 0xFFFFFFFF) + goto out; + else if (count++ >= 300) + goto out; + if (!(count % 20)) + pr_info("waiting on diag reset bit to clear, count = %d\n", + (count / 20)); + } while (host_diagnostic & LEAPIORAID_DIAG_RESET_ADAPTER); +#else + msleep(50); + for (count = 0; count < (300000 / 256); count++) { + host_diagnostic = + ioc->base_readl(&ioc->chip->HostDiagnostic, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if (host_diagnostic == 0xFFFFFFFF) { + pr_err("%s Invalid host diagnostic register value\n", + ioc->name); + leapioraid_base_dump_reg_set(ioc); + goto out; + } + if (!(host_diagnostic & LEAPIORAID_DIAG_RESET_ADAPTER)) + break; + + msleep(256); + } +#endif + if (host_diagnostic & 0x00000100) { + drsprintk(ioc, pr_info( + "%s restart IOC assuming HCB Address points to good F/W\n", + ioc->name)); + host_diagnostic &= ~0x00001800; + host_diagnostic |= 0x00000800; + writel(host_diagnostic, &ioc->chip->HostDiagnostic); + drsprintk(ioc, pr_err( + "%s re-enable the HCDW\n", ioc->name)); + writel(hcb_size | 0x00000001, + &ioc->chip->HCBSize); + } + drsprintk(ioc, pr_info("%s restart the adapter\n", + ioc->name)); + writel(host_diagnostic & ~0x00000002, + &ioc->chip->HostDiagnostic); + leapioraid_base_lock_host_diagnostic(ioc); + mutex_unlock(&ioc->hostdiag_unlock_mutex); + drsprintk(ioc, pr_info("%s Wait for FW to go to the READY state\n", + ioc->name)); + ioc_state = + leapioraid_base_wait_on_iocstate( + ioc, LEAPIORAID_IOC_STATE_READY, 20); + if (ioc_state) { + pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + leapioraid_base_dump_reg_set(ioc); + goto out; + } + drsprintk(ioc, pr_err( + "%s Unlocking pci cfg space access\n", ioc->name)); + pci_cfg_access_unlock(ioc->pdev); + if (ioc->open_pcie_trace) + leapioraid_base_trace_log_init(ioc); + pr_info("%s diag reset: SUCCESS\n", ioc->name); + return 0; +out: + drsprintk(ioc, pr_err( + "%s Unlocking pci cfg space access\n", ioc->name)); + pci_cfg_access_unlock(ioc->pdev); + pr_err("%s diag reset: FAILED\n", ioc->name); + mutex_unlock(&ioc->hostdiag_unlock_mutex); + return -EFAULT; +} + +static int +leapioraid_base_wait_for_iocstate( + struct LEAPIORAID_ADAPTER *ioc, int timeout) +{ + u32 ioc_state; + int rc; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!leapioraid_base_pci_device_is_available(ioc)) + return 0; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + if (((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_READY) || + (ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_OPERATIONAL) + return 0; + if (ioc_state & LEAPIORAID_DOORBELL_USED) { + dhsprintk(ioc, + pr_info("%s unexpected doorbell active!\n", ioc->name)); + goto issue_diag_reset; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + pr_err("%s %s: Skipping the diag reset here. (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } + ioc_state = + leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_READY, + timeout); + if (ioc_state) { + pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } +issue_diag_reset: + rc = leapioraid_base_diag_reset(ioc); + return rc; +} + +int +leapioraid_base_check_for_fault_and_issue_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + u32 ioc_state; + int rc = -EFAULT; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!leapioraid_base_pci_device_is_available(ioc)) + return rc; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_mask_interrupts(ioc); + rc = leapioraid_base_diag_reset(ioc); + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_wait_for_coredump_completion(ioc, __func__); + leapioraid_base_mask_interrupts(ioc); + rc = leapioraid_base_diag_reset(ioc); + } + return rc; +} + +static int +leapioraid_base_get_ioc_facts(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCFactsReq_t mpi_request; + struct LeapioraidIOCFactsRep_t mpi_reply; + struct leapioraid_facts *facts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + r = leapioraid_base_wait_for_iocstate(ioc, 10); + if (r) { + pr_err( + "%s %s: failed getting to correct state\n", ioc->name, + __func__); + return r; + } + mpi_reply_sz = sizeof(struct LeapioraidIOCFactsRep_t); + mpi_request_sz = sizeof(struct LeapioraidIOCFactsReq_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = LEAPIORAID_FUNC_IOC_FACTS; + r = leapioraid_base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *) &mpi_request, + mpi_reply_sz, + (u16 *) &mpi_reply, 5); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + facts = &ioc->facts; + memset(facts, 0, sizeof(struct leapioraid_facts)); + facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); + facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); + facts->IOCNumber = mpi_reply.IOCNumber; + pr_info("%s IOC Number : %d\n", ioc->name, facts->IOCNumber); + ioc->IOCNumber = facts->IOCNumber; + facts->VP_ID = mpi_reply.VP_ID; + facts->VF_ID = mpi_reply.VF_ID; + facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); + facts->MaxChainDepth = mpi_reply.MaxChainDepth; + facts->WhoInit = mpi_reply.WhoInit; + facts->NumberOfPorts = mpi_reply.NumberOfPorts; + facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; + if (ioc->msix_enable && (facts->MaxMSIxVectors <= 16)) + ioc->combined_reply_queue = 0; + facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); + facts->MaxReplyDescriptorPostQueueDepth = + le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); + facts->ProductID = le16_to_cpu(mpi_reply.ProductID); + facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); + if ((facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) + ioc->ir_firmware = 1; + if ((facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) + && (!reset_devices)) + ioc->rdpq_array_capable = 1; + else + ioc->rdpq_array_capable = 0; + if (facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_ATOMIC_REQ) + ioc->atomic_desc_capable = 1; + else + ioc->atomic_desc_capable = 0; + + facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); + facts->IOCRequestFrameSize = le16_to_cpu(mpi_reply.IOCRequestFrameSize); + facts->IOCMaxChainSegmentSize = + le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); + facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); + facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); + ioc->shost->max_id = -1; + facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); + facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); + facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); + facts->HighPriorityCredit = le16_to_cpu(mpi_reply.HighPriorityCredit); + facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; + facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); + facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; + ioc->page_size = 1 << facts->CurrentHostPageSize; + if (ioc->page_size == 1) { + pr_err( + "%s CurrentHostPageSize is 0: Setting host page to 4k\n", + ioc->name); + ioc->page_size = 1 << 12; + } + dinitprintk(ioc, + pr_info("%s CurrentHostPageSize(%d)\n", + ioc->name, facts->CurrentHostPageSize)); + dinitprintk(ioc, + pr_info("%s hba queue depth(%d), max chains per io(%d)\n", + ioc->name, facts->RequestCredit, facts->MaxChainDepth)); + dinitprintk(ioc, + pr_info("%s request frame size(%d), reply frame size(%d)\n", + ioc->name, + facts->IOCRequestFrameSize * 4, + facts->ReplyFrameSize * 4)); + return 0; +} + +static void +leapioraid_base_unmap_resources(struct LEAPIORAID_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + + pr_info("%s %s\n", ioc->name, __func__); + leapioraid_base_free_irq(ioc); + leapioraid_base_disable_msix(ioc); + kfree(ioc->replyPostRegisterIndex); + mutex_lock(&ioc->pci_access_mutex); + if (ioc->chip_phys) { + iounmap(ioc->chip); + ioc->chip_phys = 0; + } + + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_device(pdev); + mutex_unlock(&ioc->pci_access_mutex); +} + +int +leapioraid_base_map_resources(struct LEAPIORAID_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + u32 memap_sz; + u32 pio_sz; + int i, r = 0, rc; + u64 pio_chip = 0; + phys_addr_t chip_phys = 0; + struct leapioraid_adapter_reply_queue *reply_q; + int iopoll_q_count = 0; + + dinitprintk(ioc, pr_info("%s %s\n", + ioc->name, __func__)); + + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_enable_device_mem(pdev)) { + pr_warn("%s pci_enable_device_mem: failed\n", ioc->name); + return -ENODEV; + } + if (pci_request_selected_regions(pdev, ioc->bars, ioc->driver_name)) { + pr_warn("%s pci_request_selected_regions: failed\n", ioc->name); + r = -ENODEV; + goto out_fail; + } + + pci_set_master(pdev); + + if (leapioraid_base_config_dma_addressing(ioc, pdev) != 0) { + pr_warn("%s no suitable DMA mask for %s\n", + ioc->name, pci_name(pdev)); + r = -ENODEV; + goto out_fail; + } + for (i = 0, memap_sz = 0, pio_sz = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + if (pio_sz) + continue; + pio_chip = (u64) pci_resource_start(pdev, i); + pio_sz = pci_resource_len(pdev, i); + } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { + if (memap_sz) + continue; + ioc->chip_phys = pci_resource_start(pdev, i); + chip_phys = ioc->chip_phys; + memap_sz = pci_resource_len(pdev, i); + ioc->chip = ioremap(ioc->chip_phys, memap_sz); + if (ioc->chip == NULL) { + pr_err("%s unable to map adapter memory!\n", + ioc->name); + r = -EINVAL; + goto out_fail; + } + } + } + leapioraid_base_mask_interrupts(ioc); + r = leapioraid_base_get_ioc_facts(ioc); + if (r) { + rc = leapioraid_base_check_for_fault_and_issue_reset(ioc); + if (rc || (leapioraid_base_get_ioc_facts(ioc))) + goto out_fail; + } + if (!ioc->rdpq_array_enable_assigned) { + ioc->rdpq_array_enable = ioc->rdpq_array_capable; + ioc->rdpq_array_enable_assigned = 1; + } + r = leapioraid_base_enable_msix(ioc); + if (r) + goto out_fail; + iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + for (i = 0; i < iopoll_q_count; i++) { + atomic_set(&ioc->blk_mq_poll_queues[i].busy, 0); + atomic_set(&ioc->blk_mq_poll_queues[i].pause, 0); + } + if (!ioc->is_driver_loading) + leapioraid_base_init_irqpolls(ioc); + if (ioc->combined_reply_queue) { + ioc->replyPostRegisterIndex = kcalloc(ioc->nc_reply_index_count, + sizeof(resource_size_t *), + GFP_KERNEL); + if (!ioc->replyPostRegisterIndex) { + pr_err("%s allocation for reply Post Register Index failed!!!\n", + ioc->name); + r = -ENOMEM; + goto out_fail; + } + + for (i = 0; i < ioc->nc_reply_index_count; i++) { + ioc->replyPostRegisterIndex[i] = (resource_size_t *) + ((u8 *) &ioc->chip->Doorbell + + 0x0000030C + + (i * 0x10)); + } + } + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (reply_q->msix_index >= ioc->iopoll_q_start_index) { + pr_info("%s enabled: index: %d\n", + reply_q->name, reply_q->msix_index); + continue; + } + pr_info("%s %s: IRQ %d\n", + reply_q->name, + ((ioc->msix_enable) ? "PCI-MSI-X enabled" : + "IO-APIC enabled"), pci_irq_vector(ioc->pdev, + reply_q->msix_index)); + } + pr_info("%s iomem(%pap), mapped(0x%p), size(%d)\n", + ioc->name, &chip_phys, ioc->chip, memap_sz); + pr_info("%s ioport(0x%016llx), size(%d)\n", + ioc->name, (unsigned long long)pio_chip, pio_sz); + + pci_save_state(pdev); + return 0; +out_fail: + leapioraid_base_unmap_resources(ioc); + return r; +} + +void *leapioraid_base_get_msg_frame( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->request + (smid * ioc->request_sz)); +} + +void *leapioraid_base_get_sense_buffer( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); +} + +__le32 +leapioraid_base_get_sense_buffer_dma( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + return cpu_to_le32(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + +__le64 +leapioraid_base_get_sense_buffer_dma_64(struct LEAPIORAID_ADAPTER *ioc, + u16 smid) +{ + return cpu_to_le64(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + +void *leapioraid_base_get_reply_virt_addr(struct LEAPIORAID_ADAPTER *ioc, + u32 phys_addr) +{ + if (!phys_addr) + return NULL; + return ioc->reply + (phys_addr - (u32) ioc->reply_dma); +} + +static inline u8 +leapioraid_base_get_msix_index( + struct LEAPIORAID_ADAPTER *ioc, struct scsi_cmnd *scmd) +{ + if (ioc->msix_load_balance) + return ioc->reply_queue_count ? + leapioraid_base_mod64(atomic64_add_return(1, &ioc->total_io_cnt), + ioc->reply_queue_count) : 0; + if (scmd && ioc->shost->nr_hw_queues > 1) { + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + + return blk_mq_unique_tag_to_hwq(tag) + ioc->high_iops_queues; + } + return ioc->cpu_msix_table[raw_smp_processor_id()]; +} + +inline unsigned long +leapioraid_base_sdev_nr_inflight_request(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + return scsi_device_busy(scmd->device); +} + +static inline u8 +leapioraid_base_get_high_iops_msix_index(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + if (leapioraid_base_sdev_nr_inflight_request(ioc, scmd) > + LEAPIORAID_DEVICE_HIGH_IOPS_DEPTH) + return + leapioraid_base_mod64((atomic64_add_return + (1, + &ioc->high_iops_outstanding) / + LEAPIORAID_HIGH_IOPS_BATCH_COUNT), + LEAPIORAID_HIGH_IOPS_REPLY_QUEUES); + return leapioraid_base_get_msix_index(ioc, scmd); +} + +u16 +leapioraid_base_get_smid(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct leapioraid_request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->internal_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + pr_err("%s %s: smid not available\n", + ioc->name, __func__); + return 0; + } + request = list_entry(ioc->internal_free_list.next, + struct leapioraid_request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +u16 +leapioraid_base_get_smid_scsiio(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd) +{ + struct leapioraid_scsiio_tracker *request; + u16 smid; + u32 tag = scsi_cmd_to_rq(scmd)->tag; + u32 unique_tag; + + unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + tag = blk_mq_unique_tag_to_tag(unique_tag); + ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag); + request = leapioraid_base_scsi_cmd_priv(scmd); + smid = tag + 1; + request->cb_idx = cb_idx; + request->smid = smid; + request->scmd = scmd; + return smid; +} + +u16 +leapioraid_base_get_smid_hpr(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct leapioraid_request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return 0; + } + request = list_entry(ioc->hpr_free_list.next, + struct leapioraid_request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +static void +leapioraid_base_recovery_check(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->shost_recovery && ioc->pending_io_count) { + if (ioc->pending_io_count == 1) + wake_up(&ioc->reset_wq); + ioc->pending_io_count--; + } +} + +void +leapioraid_base_clear_st(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsiio_tracker *st) +{ + if (!st) + return; + if (WARN_ON(st->smid == 0)) + return; + st->cb_idx = 0xFF; + st->direct_io = 0; + st->scmd = NULL; + atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0); +} + +void +leapioraid_base_free_smid(struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + int i; + struct leapioraid_scsiio_tracker *st; + void *request; + + if (smid < ioc->hi_priority_smid) { + st = leapioraid_get_st_from_smid(ioc, smid); + if (!st) { + leapioraid_base_recovery_check(ioc); + return; + } + request = leapioraid_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + leapioraid_base_clear_st(ioc, st); + leapioraid_base_recovery_check(ioc); + ioc->io_queue_num[smid - 1] = 0xFFFF; + return; + } + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + ioc->hpr_lookup[i].cb_idx = 0xFF; + list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + ioc->internal_lookup[i].cb_idx = 0xFF; + list_add(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); +} + +#if defined(writeq) && defined(CONFIG_64BIT) +static inline void +leapioraid_base_writeq( + __u64 b, void __iomem *addr, spinlock_t *writeq_lock) +{ + writeq(b, addr); +} +#else +static inline void +leapioraid_base_writeq( + __u64 b, void __iomem *addr, spinlock_t *writeq_lock) +{ + unsigned long flags; + __u64 data_out = b; + + spin_lock_irqsave(writeq_lock, flags); + writel((u32) (data_out), addr); + writel((u32) (data_out >> 32), (addr + 4)); + spin_unlock_irqrestore(writeq_lock, flags); +} +#endif + +static u8 +leapioraid_base_set_and_get_msix_index( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct leapioraid_scsiio_tracker *st; + + st = (smid < + ioc->hi_priority_smid) ? (leapioraid_get_st_from_smid(ioc, + smid)) + : (NULL); + if (st == NULL) + return leapioraid_base_get_msix_index(ioc, NULL); + st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd); + return st->msix_io; +} + +static void +leapioraid_base_put_smid_scsi_io(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 handle) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request = (u64 *) &descriptor; + + descriptor.SCSIIO.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.SCSIIO.MSIxIndex + = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_fast_path(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 handle) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request = (u64 *) &descriptor; + + descriptor.SCSIIO.RequestFlags = + LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.SCSIIO.MSIxIndex + = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_hi_priority(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 msix_task) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request; + + request = (u64 *) &descriptor; + descriptor.HighPriority.RequestFlags = + LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.HighPriority.MSIxIndex = msix_task; + descriptor.HighPriority.SMID = cpu_to_le16(smid); + descriptor.HighPriority.LMID = 0; + descriptor.HighPriority.Reserved1 = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_default(struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request; + + request = (u64 *) &descriptor; + descriptor.Default.RequestFlags = + LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.Default.MSIxIndex + = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.Default.SMID = cpu_to_le16(smid); + descriptor.Default.LMID = 0; + descriptor.Default.DescriptorTypeDependent = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_scsi_io_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 handle) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *) &descriptor; + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static void +leapioraid_base_put_smid_fast_path_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 handle) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *) &descriptor; + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static void +leapioraid_base_put_smid_hi_priority_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 msix_task) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *) &descriptor; + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.MSIxIndex = msix_task; + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static void +leapioraid_base_put_smid_default_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *)(&descriptor); + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static int +leapioraid_base_display_fwpkg_version(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidFWImgHeader_t *fw_img_hdr; + struct LeapioraidComptImgHeader_t *cmp_img_hdr; + struct LeapioraidFWUploadReq_t *mpi_request; + struct LeapioraidFWUploadRep_t mpi_reply; + int r = 0, issue_diag_reset = 0; + u32 package_version = 0; + void *fwpkg_data = NULL; + dma_addr_t fwpkg_data_dma; + u16 smid, ioc_status; + size_t data_length; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err("%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + data_length = sizeof(struct LeapioraidFWImgHeader_t); + fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &fwpkg_data_dma, GFP_ATOMIC); + if (!fwpkg_data) + return -ENOMEM; + + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + r = -EAGAIN; + goto out; + } + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidFWUploadReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_FW_UPLOAD; + mpi_request->ImageType = 0x01; + mpi_request->ImageSize = data_length; + ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma, + data_length); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, 15 * HZ); + dinitprintk(ioc, pr_info("%s %s: complete\n", + ioc->name, __func__)); + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidFWUploadReq_t) / 4); + issue_diag_reset = 1; + } else { + memset(&mpi_reply, 0, sizeof(struct LeapioraidFWUploadRep_t)); + if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + memcpy(&mpi_reply, ioc->base_cmds.reply, + sizeof(struct LeapioraidFWUploadRep_t)); + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) { + fw_img_hdr = + (struct LeapioraidFWImgHeader_t *) fwpkg_data; + if (le32_to_cpu(fw_img_hdr->Signature) == + 0xEB000042) { + cmp_img_hdr = + (struct LeapioraidComptImgHeader_t + *) (fwpkg_data); + package_version = + le32_to_cpu(cmp_img_hdr->ApplicationSpecific); + } else + package_version = + le32_to_cpu(fw_img_hdr->PackageVersion.Word); + if (package_version) + pr_err( + "%s FW Package Version(%02d.%02d.%02d.%02d)\n", + ioc->name, + ((package_version) & 0xFF000000) + >> 24, + ((package_version) & 0x00FF0000) + >> 16, + ((package_version) & 0x0000FF00) + >> 8, + (package_version) & 0x000000FF); + } else { + leapioraid_debug_dump_mf(&mpi_reply, + sizeof(struct LeapioraidFWUploadRep_t) / + 4); + } + } + } + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; +out: + if (fwpkg_data) + dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data, + fwpkg_data_dma); + if (issue_diag_reset) { + if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED) + return -EFAULT; + if (leapioraid_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + return r; +} + +static void +leapioraid_base_display_ioc_capabilities(struct LEAPIORAID_ADAPTER *ioc) +{ + int i = 0; + char desc[17] = { 0 }; + u8 revision; + u32 iounit_pg1_flags; + + pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); + strscpy(desc, ioc->manu_pg0.ChipName, sizeof(desc)); + pr_info("%s %s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n", + ioc->name, desc, + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF, revision); + pr_info("%s Protocol=(", ioc->name); + if (ioc->facts.ProtocolFlags & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { + pr_info("Initiator"); + i++; + } + if (ioc->facts.ProtocolFlags & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_TARGET) { + pr_info("%sTarget", i ? "," : ""); + i++; + } + i = 0; + pr_info("), "); + pr_info("Capabilities=("); + if ((!ioc->warpdrive_msg) && (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) { + pr_info("Raid"); + i++; + } + if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_TLR) { + pr_info("%sTLR", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_MULTICAST) { + pr_info("%sMulticast", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { + pr_info("%sBIDI Target", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_EEDP) { + pr_info("%sEEDP", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { + pr_info("%sTask Set Full", i ? "," : ""); + i++; + } + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if (!(iounit_pg1_flags & LEAPIORAID_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { + pr_info("%sNCQ", i ? "," : ""); + i++; + } + pr_info(")\n"); +} + +static int +leapioraid_base_update_ioc_page1_inlinewith_perf_mode( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCP1_t ioc_pg1; + struct LeapioraidCfgRep_t mpi_reply; + int rc; + + rc = leapioraid_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy); + if (rc) + return rc; + memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(struct LeapioraidIOCP1_t)); + switch (perf_mode) { + case LEAPIORAID_PERF_MODE_DEFAULT: + case LEAPIORAID_PERF_MODE_BALANCED: + if (ioc->high_iops_queues) { + pr_err( + "%s Enable int coalescing only for first %d reply queues\n", + ioc->name, LEAPIORAID_HIGH_IOPS_REPLY_QUEUES); + ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 | + ((1 << + LEAPIORAID_HIGH_IOPS_REPLY_QUEUES + / 8) - 1)); + rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc_pg1); + if (rc) + return rc; + pr_err("%s performance mode: balanced\n", ioc->name); + return 0; + } + fallthrough; + case LEAPIORAID_PERF_MODE_LATENCY: + ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa); + ioc_pg1.Flags |= cpu_to_le32(0x00000001); + ioc_pg1.ProductSpecific = 0; + rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + pr_err("%s performance mode: latency\n", ioc->name); + break; + case LEAPIORAID_PERF_MODE_IOPS: + pr_err( + "%s performance mode: iops with coalescing timeout: 0x%x\n", + ioc->name, le32_to_cpu(ioc_pg1.CoalescingTimeout)); + ioc_pg1.Flags |= cpu_to_le32(0x00000001); + ioc_pg1.ProductSpecific = 0; + rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + break; + } + return 0; +} + +static int +leapioraid_base_assign_fw_reported_qd(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + int sz; + int rc = 0; + + ioc->max_wideport_qd = LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = LEAPIORAID_SATA_QUEUE_DEPTH; + + sz = offsetof(struct LeapioraidSasIOUnitP1_t, PhyData); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return rc; + } + rc = leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz); + if (rc) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->max_wideport_qd = + (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ? + le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) : + LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = + (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ? + le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) : + LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ? + sas_iounit_pg1->SATAMaxQDepth : LEAPIORAID_SATA_QUEUE_DEPTH; +out: + dinitprintk(ioc, pr_err( + "%s MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x\n", + ioc->name, ioc->max_wideport_qd, + ioc->max_narrowport_qd, ioc->max_sata_qd)); + kfree(sas_iounit_pg1); + return rc; +} + +static int +leapioraid_base_static_config_pages(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidCfgRep_t mpi_reply; + u32 iounit_pg1_flags; + int rc; + + rc = leapioraid_config_get_manufacturing_pg0(ioc, &mpi_reply, + &ioc->manu_pg0); + if (rc) + return rc; + if (ioc->ir_firmware) { + rc = leapioraid_config_get_manufacturing_pg10(ioc, &mpi_reply, + &ioc->manu_pg10); + if (rc) + return rc; + } + rc = leapioraid_config_get_manufacturing_pg11(ioc, &mpi_reply, + &ioc->manu_pg11); + if (rc) + return rc; + + ioc->time_sync_interval = + ioc->manu_pg11.TimeSyncInterval & 0x7F; + if (ioc->time_sync_interval) { + if (ioc->manu_pg11.TimeSyncInterval & 0x80) + ioc->time_sync_interval = + ioc->time_sync_interval * 3600; + else + ioc->time_sync_interval = + ioc->time_sync_interval * 60; + dinitprintk(ioc, pr_info( + "%s Driver-FW TimeSync interval is %d seconds.\n\t\t" + "ManuPg11 TimeSync Unit is in %s's", + ioc->name, + ioc->time_sync_interval, + ((ioc->manu_pg11.TimeSyncInterval & 0x80) + ? "Hour" : "Minute"))); + } + rc = leapioraid_base_assign_fw_reported_qd(ioc); + if (rc) + return rc; + rc = leapioraid_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); + if (rc) + return rc; + rc = leapioraid_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); + if (rc) + return rc; + rc = leapioraid_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); + if (rc) + return rc; + rc = leapioraid_config_get_iounit_pg0(ioc, &mpi_reply, + &ioc->iounit_pg0); + if (rc) + return rc; + rc = leapioraid_config_get_iounit_pg1(ioc, &mpi_reply, + &ioc->iounit_pg1); + if (rc) + return rc; + rc = leapioraid_config_get_iounit_pg8(ioc, &mpi_reply, + &ioc->iounit_pg8); + if (rc) + return rc; + leapioraid_base_display_ioc_capabilities(ioc); + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if ((ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) + iounit_pg1_flags &= + ~LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + else + iounit_pg1_flags |= + LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); + rc = leapioraid_config_set_iounit_pg1(ioc, &mpi_reply, + &ioc->iounit_pg1); + if (rc) + return rc; + if (ioc->iounit_pg8.NumSensors) + ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; + + rc = leapioraid_base_update_ioc_page1_inlinewith_perf_mode(ioc); + if (rc) + return rc; + + return 0; +} + +void +leapioraid_free_enclosure_list(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_enclosure_node *enclosure_dev, *enclosure_dev_next; + + list_for_each_entry_safe(enclosure_dev, + enclosure_dev_next, &ioc->enclosure_list, + list) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } +} + +static void +leapioraid_base_release_memory_pools(struct LEAPIORAID_ADAPTER *ioc) +{ + int i, j; + int dma_alloc_count = 0; + struct leapioraid_chain_tracker *ct; + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + + dexitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->request) { + dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz, + ioc->request, ioc->request_dma); + dexitprintk(ioc, + pr_info("%s request_pool(0x%p): free\n", + ioc->name, ioc->request)); + ioc->request = NULL; + } + if (ioc->sense) { + dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + dma_pool_destroy(ioc->sense_dma_pool); + dexitprintk(ioc, pr_info("%s sense_pool(0x%p): free\n", + ioc->name, ioc->sense)); + ioc->sense = NULL; + } + if (ioc->reply) { + dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); + dma_pool_destroy(ioc->reply_dma_pool); + dexitprintk(ioc, pr_info("%s reply_pool(0x%p): free\n", + ioc->name, ioc->reply)); + ioc->reply = NULL; + } + if (ioc->reply_free) { + dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, + ioc->reply_free_dma); + dma_pool_destroy(ioc->reply_free_dma_pool); + dexitprintk(ioc, pr_info("%s reply_free_pool(0x%p): free\n", + ioc->name, ioc->reply_free)); + ioc->reply_free = NULL; + } + if (ioc->reply_post) { + dma_alloc_count = DIV_ROUND_UP(count, + LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK); + for (i = 0; i < count; i++) { + if (i % LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0 + && dma_alloc_count) { + if (ioc->reply_post[i].reply_post_free) { + dma_pool_free(ioc->reply_post_free_dma_pool, + ioc->reply_post[i].reply_post_free, + ioc->reply_post[i].reply_post_free_dma); + pr_err( + "%s reply_post_free_pool(0x%p): free\n", + ioc->name, + ioc->reply_post[i].reply_post_free); + ioc->reply_post[i].reply_post_free = + NULL; + } + --dma_alloc_count; + } + } + dma_pool_destroy(ioc->reply_post_free_dma_pool); + if (ioc->reply_post_free_array && ioc->rdpq_array_enable) { + dma_pool_free(ioc->reply_post_free_array_dma_pool, + ioc->reply_post_free_array, + ioc->reply_post_free_array_dma); + ioc->reply_post_free_array = NULL; + } + dma_pool_destroy(ioc->reply_post_free_array_dma_pool); + kfree(ioc->reply_post); + } + if (ioc->config_page) { + dexitprintk(ioc, pr_err( + "%s config_page(0x%p): free\n", ioc->name, + ioc->config_page)); + dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz, + ioc->config_page, ioc->config_page_dma); + } + kfree(ioc->hpr_lookup); + kfree(ioc->internal_lookup); + if (ioc->chain_lookup) { + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + if (ct && ct->chain_buffer) + dma_pool_free(ioc->chain_dma_pool, + ct->chain_buffer, + ct->chain_buffer_dma); + } + kfree(ioc->chain_lookup[i].chains_per_smid); + } + dma_pool_destroy(ioc->chain_dma_pool); + kfree(ioc->chain_lookup); + ioc->chain_lookup = NULL; + } + kfree(ioc->io_queue_num); + ioc->io_queue_num = NULL; +} + +static int +leapioraid_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz) +{ + dma_addr_t end_address; + + end_address = start_address + pool_sz - 1; + if (upper_32_bits(start_address) == upper_32_bits(end_address)) + return 1; + else + return 0; +} + +static inline int +leapioraid_base_reduce_hba_queue_depth(struct LEAPIORAID_ADAPTER *ioc) +{ + int reduce_sz = 64; + + if ((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= reduce_sz; + return 0; + } else + return -ENOMEM; +} + +static int +leapioraid_base_allocate_reply_post_free_array(struct LEAPIORAID_ADAPTER *ioc, + int reply_post_free_array_sz) +{ + ioc->reply_post_free_array_dma_pool = + dma_pool_create("reply_post_free_array pool", + &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); + if (!ioc->reply_post_free_array_dma_pool) { + dinitprintk(ioc, + pr_err + ("reply_post_free_array pool: dma_pool_create failed\n")); + return -ENOMEM; + } + ioc->reply_post_free_array = + dma_pool_alloc(ioc->reply_post_free_array_dma_pool, + GFP_KERNEL, &ioc->reply_post_free_array_dma); + if (!ioc->reply_post_free_array) { + dinitprintk(ioc, + pr_err + ("reply_post_free_array pool: dma_pool_alloc failed\n")); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->reply_post_free_array_dma, + reply_post_free_array_sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Free Pool! Reply Free (0x%p)\n\t\t" + "Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long)ioc->reply_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + return 0; +} + +static int +base_alloc_rdpq_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + int i = 0; + u32 dma_alloc_count = 0; + int reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(struct LeapioraidDefaultRepDesc_t); + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + + ioc->reply_post = + kcalloc(count, sizeof(struct leapioraid_reply_post_struct), GFP_KERNEL); + if (!ioc->reply_post) { + pr_err("%s reply_post_free pool: kcalloc failed\n", ioc->name); + return -ENOMEM; + } + dma_alloc_count = DIV_ROUND_UP( + count, LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK); + ioc->reply_post_free_dma_pool = + dma_pool_create("reply_post_free pool", &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) { + pr_err("reply_post_free pool: dma_pool_create failed\n"); + return -ENOMEM; + } + for (i = 0; i < count; i++) { + if ((i % LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) { + ioc->reply_post[i].reply_post_free = + dma_pool_zalloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + pr_err("reply_post_free pool: dma_pool_alloc failed\n"); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region + (ioc->reply_post[i].reply_post_free_dma, sz)) { + dinitprintk(ioc, pr_err( + "%s bad Replypost free pool(0x%p) dma = (0x%llx)\n", + ioc->name, + ioc->reply_post[i].reply_post_free, + (unsigned long long) + ioc->reply_post[i].reply_post_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + dma_alloc_count--; + } else { + ioc->reply_post[i].reply_post_free = + (union LeapioraidRepDescUnion_t *) + ((long)ioc->reply_post[i - 1].reply_post_free + + reply_post_free_sz); + ioc->reply_post[i].reply_post_free_dma = (dma_addr_t) + (ioc->reply_post[i - 1].reply_post_free_dma + + reply_post_free_sz); + } + } + return 0; +} + +static int +leapioraid_base_allocate_chain_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + int i = 0, j = 0; + struct leapioraid_chain_tracker *ctr; + + ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, + ioc->chain_segment_sz, 16, 0); + if (!ioc->chain_dma_pool) { + pr_err("%s chain_dma_pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ctr = &ioc->chain_lookup[i].chains_per_smid[j]; + ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool, + GFP_KERNEL, + &ctr->chain_buffer_dma); + if (!ctr->chain_buffer) + return -EAGAIN; + if (!leapioraid_check_same_4gb_region + (ctr->chain_buffer_dma, ioc->chain_segment_sz)) { + pr_err( + "%s buffers not in same 4G! buff=(0x%p) dma=(0x%llx)\n", + ioc->name, + ctr->chain_buffer, + (unsigned long long)ctr->chain_buffer_dma); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + } + } + dinitprintk(ioc, pr_info( + "%s chain_lookup depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->scsiio_depth, + ioc->chain_segment_sz, + ((ioc->scsiio_depth * + (ioc->chains_needed_per_io - + ioc->chains_per_prp_buffer) * + ioc->chain_segment_sz)) / 1024)); + return 0; +} + +static int +leapioraid_base_allocate_sense_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + ioc->sense_dma_pool = + dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0); + if (!ioc->sense_dma_pool) { + pr_err("%s sense pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, + GFP_KERNEL, &ioc->sense_dma); + if (!ioc->sense) { + pr_err("%s sense pool: dma_pool_alloc failed\n", ioc->name); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->sense_dma, sz)) { + dinitprintk(ioc, + pr_err("Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n", + ioc->sense, + (unsigned long long)ioc->sense_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + pr_err( + "%s sense pool(0x%p) - dma(0x%llx): depth(%d),\n\t\t" + "element_size(%d), pool_size (%d kB)\n", + ioc->name, + ioc->sense, + (unsigned long long)ioc->sense_dma, + ioc->scsiio_depth, + SCSI_SENSE_BUFFERSIZE, sz / 1024); + return 0; +} + +static int +leapioraid_base_allocate_reply_free_dma_pool(struct LEAPIORAID_ADAPTER *ioc, + int sz) +{ + ioc->reply_free_dma_pool = + dma_pool_create("reply_free pool", &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_free_dma_pool) { + pr_err("%s reply_free pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, + GFP_KERNEL, &ioc->reply_free_dma); + if (!ioc->reply_free) { + pr_err("%s reply_free pool: dma_pool_alloc failed\n", ioc->name); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->reply_free_dma, sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Free Pool! Reply Free (0x%p)\n\t\t" + "Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long)ioc->reply_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + memset(ioc->reply_free, 0, sz); + dinitprintk(ioc, pr_info( + "%s reply_free pool(0x%p): depth(%d),\n\t\t" + "element_size(%d), pool_size(%d kB)\n", + ioc->name, + ioc->reply_free, + ioc->reply_free_queue_depth, 4, + sz / 1024)); + dinitprintk(ioc, + pr_info("%s reply_free_dma (0x%llx)\n", + ioc->name, (unsigned long long)ioc->reply_free_dma)); + return 0; +} + +static int +leapioraid_base_allocate_reply_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + ioc->reply_dma_pool = dma_pool_create("reply pool", + &ioc->pdev->dev, sz, 4, 0); + if (!ioc->reply_dma_pool) { + pr_err("%s reply pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, + &ioc->reply_dma); + if (!ioc->reply) { + pr_err("%s reply pool: dma_pool_alloc failed\n", ioc->name); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->reply_dma, sz)) { + dinitprintk(ioc, + pr_err("Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n", + ioc->reply, + (unsigned long long)ioc->reply_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + ioc->reply_dma_min_address = (u32) (ioc->reply_dma); + ioc->reply_dma_max_address = (u32) (ioc->reply_dma) + sz; + pr_err( + "%s reply pool(0x%p) - dma(0x%llx): depth(%d)\n\t\t" + "frame_size(%d), pool_size(%d kB)\n", + ioc->name, + ioc->reply, + (unsigned long long)ioc->reply_dma, + ioc->reply_free_queue_depth, + ioc->reply_sz, + sz / 1024); + return 0; +} + +static int +leapioraid_base_allocate_memory_pools(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_facts *facts; + u16 max_sge_elements; + u16 chains_needed_per_io; + u32 sz, total_sz, reply_post_free_sz, rc = 0; + u32 retry_sz; + u32 rdpq_sz = 0, sense_sz = 0, reply_post_free_array_sz = 0; + u16 max_request_credit; + unsigned short sg_tablesize; + u16 sge_size; + int i = 0; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + retry_sz = 0; + facts = &ioc->facts; + sg_tablesize = LEAPIORAID_SG_DEPTH; + if (reset_devices) + sg_tablesize = min_t(unsigned short, sg_tablesize, + LEAPIORAID_KDUMP_MIN_PHYS_SEGMENTS); + if (sg_tablesize < LEAPIORAID_MIN_PHYS_SEGMENTS) + sg_tablesize = LEAPIORAID_MIN_PHYS_SEGMENTS; + else if (sg_tablesize > LEAPIORAID_MAX_PHYS_SEGMENTS) { + sg_tablesize = min_t(unsigned short, sg_tablesize, + LEAPIORAID_MAX_SG_SEGMENTS); + pr_warn( + "%s sg_tablesize(%u) is bigger than kernel defined %s(%u)\n", + ioc->name, + sg_tablesize, LEAPIORAID_MAX_PHYS_SEGMENTS_STRING, + LEAPIORAID_MAX_PHYS_SEGMENTS); + } + ioc->shost->sg_tablesize = sg_tablesize; + ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), + (facts->RequestCredit / 4)); + if (ioc->internal_depth < LEAPIORAID_INTERNAL_CMDS_COUNT) { + if (facts->RequestCredit <= (LEAPIORAID_INTERNAL_CMDS_COUNT + + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + pr_err( + "%s RequestCredits not enough, it has %d credits\n", + ioc->name, + facts->RequestCredit); + return -ENOMEM; + } + ioc->internal_depth = 10; + } + ioc->hi_priority_depth = ioc->internal_depth - (5); + if (reset_devices) + max_request_credit = min_t(u16, facts->RequestCredit, + (LEAPIORAID_KDUMP_SCSI_IO_DEPTH + + ioc->internal_depth)); + else + max_request_credit = min_t(u16, facts->RequestCredit, + LEAPIORAID_MAX_HBA_QUEUE_DEPTH); +retry: + ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; + ioc->request_sz = facts->IOCRequestFrameSize * 4; + ioc->reply_sz = facts->ReplyFrameSize * 4; + if (facts->IOCMaxChainSegmentSize) + ioc->chain_segment_sz = + facts->IOCMaxChainSegmentSize * LEAPIORAID_MAX_CHAIN_ELEMT_SZ; + else + ioc->chain_segment_sz = + LEAPIORAID_DEFAULT_NUM_FWCHAIN_ELEMTS * LEAPIORAID_MAX_CHAIN_ELEMT_SZ; + sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); +retry_allocation: + total_sz = 0; + max_sge_elements = + ioc->request_sz - + ((sizeof(struct LeapioraidSCSIIOReq_t) - + sizeof(union LEAPIORAID_IEEE_SGE_IO_UNION)) + 2 * sge_size); + ioc->max_sges_in_main_message = max_sge_elements / sge_size; + max_sge_elements = ioc->chain_segment_sz - sge_size; + ioc->max_sges_in_chain_message = max_sge_elements / sge_size; + chains_needed_per_io = ((ioc->shost->sg_tablesize - + ioc->max_sges_in_main_message) / + ioc->max_sges_in_chain_message) + + 1; + if (chains_needed_per_io > facts->MaxChainDepth) { + chains_needed_per_io = facts->MaxChainDepth; + ioc->shost->sg_tablesize = min_t(u16, + ioc->max_sges_in_main_message + + (ioc->max_sges_in_chain_message * + chains_needed_per_io), + ioc->shost->sg_tablesize); + } + ioc->chains_needed_per_io = chains_needed_per_io; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + ioc->reply_post_queue_depth = ioc->hba_queue_depth + + ioc->reply_free_queue_depth + 1; + if (ioc->reply_post_queue_depth % 16) + ioc->reply_post_queue_depth += + 16 - (ioc->reply_post_queue_depth % 16); + if (ioc->reply_post_queue_depth > + facts->MaxReplyDescriptorPostQueueDepth) { + ioc->reply_post_queue_depth = + facts->MaxReplyDescriptorPostQueueDepth - + (facts->MaxReplyDescriptorPostQueueDepth % 16); + ioc->hba_queue_depth = + ((ioc->reply_post_queue_depth - 64) / 2) - 1; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + } + pr_info( + "%s scatter gather: sge_in_main_msg(%d),\n\t\t" + "sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n", + ioc->name, + ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, + ioc->shost->sg_tablesize, + ioc->chains_needed_per_io); + ioc->scsiio_depth = ioc->hba_queue_depth - + ioc->hi_priority_depth - ioc->internal_depth; + ioc->shost->can_queue = + ioc->scsiio_depth - LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT; + dinitprintk(ioc, pr_info("%s scsi host: can_queue depth (%d)\n", ioc->name, + ioc->shost->can_queue)); + sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); + sz += (ioc->hi_priority_depth * ioc->request_sz); + sz += (ioc->internal_depth * ioc->request_sz); + ioc->request_dma_sz = sz; + ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz, + &ioc->request_dma, GFP_KERNEL); + if (!ioc->request) { + if (ioc->scsiio_depth < LEAPIORAID_SAS_QUEUE_DEPTH) { + rc = -ENOMEM; + goto out; + } + retry_sz = 64; + if ((ioc->hba_queue_depth - retry_sz) > + (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= retry_sz; + goto retry_allocation; + } else { + rc = -ENOMEM; + goto out; + } + } + memset(ioc->request, 0, sz); + if (retry_sz) + pr_err( + "%s request pool: dma_alloc_consistent succeed:\n\t\t" + "hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n", + ioc->name, + ioc->hba_queue_depth, + ioc->chains_needed_per_io, + ioc->request_sz, + sz / 1024); + ioc->hi_priority = + ioc->request + ((ioc->scsiio_depth + 1) * ioc->request_sz); + ioc->hi_priority_dma = + ioc->request_dma + ((ioc->scsiio_depth + 1) * ioc->request_sz); + ioc->internal = + ioc->hi_priority + (ioc->hi_priority_depth * ioc->request_sz); + ioc->internal_dma = + ioc->hi_priority_dma + (ioc->hi_priority_depth * ioc->request_sz); + pr_info( + "%s request pool(0x%p) - dma(0x%llx):\n\t\t" + "depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, + ioc->request, + (unsigned long long)ioc->request_dma, + ioc->hba_queue_depth, + ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz) / 1024); + total_sz += sz; + ioc->io_queue_num = kcalloc(ioc->scsiio_depth, sizeof(u16), GFP_KERNEL); + if (!ioc->io_queue_num) { + rc = -ENOMEM; + goto out; + } + dinitprintk(ioc, pr_info("%s scsiio(0x%p): depth(%d)\n", + ioc->name, ioc->request, ioc->scsiio_depth)); + ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, + sizeof(struct leapioraid_request_tracker), GFP_KERNEL); + if (!ioc->hpr_lookup) { + rc = -ENOMEM; + goto out; + } + ioc->hi_priority_smid = ioc->scsiio_depth + 1; + dinitprintk(ioc, pr_info( + "%s hi_priority(0x%p): depth(%d), start smid(%d)\n", + ioc->name, ioc->hi_priority, ioc->hi_priority_depth, + ioc->hi_priority_smid)); + ioc->internal_lookup = + kcalloc(ioc->internal_depth, sizeof(struct leapioraid_request_tracker), + GFP_KERNEL); + if (!ioc->internal_lookup) { + pr_err("%s internal_lookup: kcalloc failed\n", + ioc->name); + rc = -ENOMEM; + goto out; + } + ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; + dinitprintk(ioc, pr_info( + "%s internal(0x%p): depth(%d), start smid(%d)\n", + ioc->name, ioc->internal, ioc->internal_depth, + ioc->internal_smid)); + sz = ioc->scsiio_depth * sizeof(struct leapioraid_chain_lookup); + ioc->chain_lookup = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup) { + if ((max_request_credit - 64) > + (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + leapioraid_base_release_memory_pools(ioc); + goto retry; + } else { + pr_err( + "%s chain_lookup: __get_free_pages failed\n", + ioc->name); + rc = -ENOMEM; + goto out; + } + } + sz = ioc->chains_needed_per_io * sizeof(struct leapioraid_chain_tracker); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup[i].chains_per_smid) { + if ((max_request_credit - 64) > + (ioc->internal_depth + + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + leapioraid_base_release_memory_pools(ioc); + goto retry; + } else { + pr_err("%s chain_lookup: kzalloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + } + } + ioc->chains_per_prp_buffer = 0; + rc = leapioraid_base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) { + if (ioc->use_32bit_dma && ioc->dma_mask > 32) + goto try_32bit_dma; + else { + if ((max_request_credit - 64) > + (ioc->internal_depth + + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + leapioraid_base_release_memory_pools(ioc); + goto retry_allocation; + } else { + pr_err("%s chain_lookup: dma_pool_alloc failed\n", ioc->name); + return -ENOMEM; + } + } + } + total_sz += ioc->chain_segment_sz * + ((ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * + ioc->scsiio_depth); + sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; + rc = leapioraid_base_allocate_sense_dma_pool(ioc, sense_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sense_sz; + sz = ioc->reply_free_queue_depth * ioc->reply_sz; + rc = leapioraid_base_allocate_reply_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sz; + sz = ioc->reply_free_queue_depth * 4; + rc = leapioraid_base_allocate_reply_free_dma_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sz; + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(struct LeapioraidDefaultRepDesc_t); + rdpq_sz = reply_post_free_sz * LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK; + if ((leapioraid_base_is_controller_msix_enabled(ioc) + && !ioc->rdpq_array_enable) + || (ioc->reply_queue_count < LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK)) + rdpq_sz = reply_post_free_sz * ioc->reply_queue_count; + rc = base_alloc_rdpq_dma_pool(ioc, rdpq_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + else { + if (ioc->rdpq_array_enable && rc == 0) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(struct LeapioraidIOCInitRDPQArrayEntry); + rc = leapioraid_base_allocate_reply_post_free_array( + ioc, reply_post_free_array_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + } + } + total_sz += rdpq_sz; + ioc->config_page_sz = 512; + ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, + ioc->config_page_sz, + &ioc->config_page_dma, + GFP_KERNEL); + if (!ioc->config_page) { + pr_err("%s config page: dma_pool_alloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + pr_err("%s config page(0x%p) - dma(0x%llx): size(%d)\n", + ioc->name, ioc->config_page, + (unsigned long long)ioc->config_page_dma, + ioc->config_page_sz); + total_sz += ioc->config_page_sz; + pr_info("%s Allocated physical memory: size(%d kB)\n", + ioc->name, total_sz / 1024); + pr_info( + "%s Current IOC Queue Depth(%d), Max Queue Depth(%d)\n", + ioc->name, + ioc->shost->can_queue, + facts->RequestCredit); + return 0; +try_32bit_dma: + leapioraid_base_release_memory_pools(ioc); + if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { + if (leapioraid_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + pr_err("Setting 32 bit coherent DMA mask Failed %s\n", + pci_name(ioc->pdev)); + return -ENODEV; + } + } else if (leapioraid_base_reduce_hba_queue_depth(ioc) != 0) + return -ENOMEM; + goto retry_allocation; +out: + return rc; +} + +static void +leapioraid_base_flush_ios_and_panic( + struct LEAPIORAID_ADAPTER *ioc, u16 fault_code) +{ + ioc->adapter_over_temp = 1; + leapioraid_base_stop_smart_polling(ioc); + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); + leapioraid_print_fault_code(ioc, fault_code); +} + +u32 +leapioraid_base_get_iocstate(struct LEAPIORAID_ADAPTER *ioc, int cooked) +{ + u32 s, sc; + + s = ioc->base_readl( + &ioc->chip->Doorbell, LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + sc = s & LEAPIORAID_IOC_STATE_MASK; + if (sc != LEAPIORAID_IOC_STATE_MASK) { + if ((sc == LEAPIORAID_IOC_STATE_FAULT) && + ((s & LEAPIORAID_DOORBELL_DATA_MASK) == + LEAPIORAID_IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED)) { + leapioraid_base_flush_ios_and_panic(ioc, + s & + LEAPIORAID_DOORBELL_DATA_MASK); + panic("TEMPERATURE FAULT: STOPPING; panic in %s\n", + __func__); + } + } + return cooked ? sc : s; +} + +static int +leapioraid_base_send_ioc_reset( + struct LEAPIORAID_ADAPTER *ioc, u8 reset_type, int timeout) +{ + u32 ioc_state; + int r = 0; + unsigned long flags; + + if (reset_type != LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET) { + pr_err("%s %s: unknown reset_type\n", + ioc->name, __func__); + return -EFAULT; + } + if (!(ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_EVENT_REPLAY)) + return -EFAULT; + pr_info("%s sending message unit reset !!\n", + ioc->name); + writel(reset_type << LEAPIORAID_DOORBELL_FUNCTION_SHIFT, + &ioc->chip->Doorbell); + if ((leapioraid_base_wait_for_doorbell_ack(ioc, 15))) + r = -EFAULT; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP + && (ioc->is_driver_loading == 1 + || ioc->fault_reset_work_q == NULL)) { + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + leapioraid_base_coredump_info(ioc, ioc_state); + leapioraid_base_wait_for_coredump_completion(ioc, __func__); + r = -EFAULT; + goto out; + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (r != 0) + goto out; + ioc_state = + leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_READY, + timeout); + if (ioc_state) { + pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + r = -EFAULT; + goto out; + } +out: + pr_info("%s message unit reset: %s\n", + ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); + return r; +} + +int +leapioraid_wait_for_ioc_to_operational(struct LEAPIORAID_ADAPTER *ioc, + int wait_count) +{ + int wait_state_count = 0; + u32 ioc_state; + + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return -EFAULT; + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + while (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return -EFAULT; + if (ioc->is_driver_loading) + return -ETIME; + if (wait_state_count++ == wait_count) { + pr_err( + "%s %s: failed due to ioc not operational\n", + ioc->name, __func__); + return -EFAULT; + } + ssleep(1); + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + pr_info("%s %s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info("%s %s: ioc is operational\n", + ioc->name, __func__); + return 0; +} + +int +leapioraid_base_sas_iounit_control(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSasIoUnitControlRep_t *mpi_reply, + struct LeapioraidSasIoUnitControlReq_t *mpi_request) +{ + u16 smid; + u8 issue_reset; + int rc; + void *request; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + mutex_lock(&ioc->base_cmds.mutex); + if (ioc->base_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: base_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = 0; + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(struct LeapioraidSasIoUnitControlReq_t)); + if (mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_LINK_RESET) + ioc->ioc_link_reset_in_progress = 1; + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if ((mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_LINK_RESET) && + ioc->ioc_link_reset_in_progress) + ioc->ioc_link_reset_in_progress = 0; + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof + (struct LeapioraidSasIoUnitControlReq_t) + / 4, issue_reset); + goto issue_host_reset; + } + if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(struct LeapioraidSasIoUnitControlRep_t)); + else + memset(mpi_reply, 0, sizeof(struct LeapioraidSasIoUnitControlRep_t)); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + rc = -EFAULT; +out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +int +leapioraid_base_scsi_enclosure_processor(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSepRep_t *mpi_reply, + struct LeapioraidSepReq_t *mpi_request) +{ + u16 smid; + u8 issue_reset; + int rc; + void *request; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + mutex_lock(&ioc->base_cmds.mutex); + if (ioc->base_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: base_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = 0; + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + request = leapioraid_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(struct LeapioraidSepReq_t)); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof(struct LeapioraidSepReq_t) / 4, + issue_reset); + goto issue_host_reset; + } + if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(struct LeapioraidSepRep_t)); + else + memset(mpi_reply, 0, sizeof(struct LeapioraidSepRep_t)); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + rc = -EFAULT; +out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +static int +leapioraid_base_get_port_facts(struct LEAPIORAID_ADAPTER *ioc, int port) +{ + struct LeapioraidPortFactsReq_t mpi_request; + struct LeapioraidPortFactsRep_t mpi_reply; + struct leapioraid_port_facts *pfacts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + mpi_reply_sz = sizeof(struct LeapioraidPortFactsRep_t); + mpi_request_sz = sizeof(struct LeapioraidPortFactsReq_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = LEAPIORAID_FUNC_PORT_FACTS; + mpi_request.PortNumber = port; + r = leapioraid_base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *) &mpi_request, + mpi_reply_sz, + (u16 *) &mpi_reply, 5); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + pfacts = &ioc->pfacts[port]; + memset(pfacts, 0, sizeof(struct leapioraid_port_facts)); + pfacts->PortNumber = mpi_reply.PortNumber; + pfacts->VP_ID = mpi_reply.VP_ID; + pfacts->VF_ID = mpi_reply.VF_ID; + pfacts->MaxPostedCmdBuffers = + le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); + return 0; +} + +static int +leapioraid_base_send_ioc_init(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCInitReq_t mpi_request; + struct LeapioraidIOCInitRep_t mpi_reply; + int i, r = 0; + ktime_t current_time; + u16 ioc_status; + u32 reply_post_free_ary_sz; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + memset(&mpi_request, 0, sizeof(struct LeapioraidIOCInitReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_IOC_INIT; + mpi_request.WhoInit = LEAPIORAID_WHOINIT_HOST_DRIVER; + mpi_request.VF_ID = 0; + mpi_request.VP_ID = 0; + mpi_request.MsgVersion = cpu_to_le16(0x0206); + mpi_request.HeaderVersion = cpu_to_le16(0x3A00); + mpi_request.HostPageSize = 12; + if (leapioraid_base_is_controller_msix_enabled(ioc)) + mpi_request.HostMSIxVectors = ioc->reply_queue_count; + mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz / 4); + mpi_request.ReplyDescriptorPostQueueDepth = + cpu_to_le16(ioc->reply_post_queue_depth); + mpi_request.ReplyFreeQueueDepth = + cpu_to_le16(ioc->reply_free_queue_depth); + mpi_request.SenseBufferAddressHigh = + cpu_to_le32((u64) ioc->sense_dma >> 32); + mpi_request.SystemReplyAddressHigh = + cpu_to_le32((u64) ioc->reply_dma >> 32); + mpi_request.SystemRequestFrameBaseAddress = + cpu_to_le64((u64) ioc->request_dma); + mpi_request.ReplyFreeQueueAddress = + cpu_to_le64((u64) ioc->reply_free_dma); + if (ioc->rdpq_array_enable) { + reply_post_free_ary_sz = ioc->reply_queue_count * + sizeof(struct LeapioraidIOCInitRDPQArrayEntry); + memset(ioc->reply_post_free_array, 0, reply_post_free_ary_sz); + for (i = 0; i < ioc->reply_queue_count; i++) + ioc->reply_post_free_array[i].RDPQBaseAddress = + cpu_to_le64((u64) ioc->reply_post[i].reply_post_free_dma); + mpi_request.MsgFlags = LEAPIORAID_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64) ioc->reply_post_free_array_dma); + } else { + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64) ioc->reply_post[0].reply_post_free_dma); + } + mpi_request.ConfigurationFlags |= 0x0002; + current_time = ktime_get_real(); + mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); + if (ioc->logging_level & LEAPIORAID_DEBUG_INIT) { + __le32 *mfp; + int i; + + mfp = (__le32 *) &mpi_request; + pr_info("%s \toffset:data\n", ioc->name); + for (i = 0; i < sizeof(struct LeapioraidIOCInitReq_t) / 4; i++) + pr_info("%s \t[0x%02x]:%08x\n", + ioc->name, i * 4, le32_to_cpu(mfp[i])); + } + r = leapioraid_base_handshake_req_reply_wait(ioc, + sizeof + (struct LeapioraidIOCInitReq_t), + (u32 *) &mpi_request, + sizeof + (struct LeapioraidIOCInitRep_t), + (u16 *) &mpi_reply, 30); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS || mpi_reply.IOCLogInfo) { + pr_err("%s %s: failed\n", ioc->name, + __func__); + r = -EIO; + } + ioc->timestamp_update_count = 0; + return r; +} + +int +leapioraid_base_trace_log_init(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCLogReq_t mpi_request; + struct LeapioraidIOCLogRep_t mpi_reply; + u16 ioc_status; + u32 r; + + dinitprintk(ioc, + pr_info("%s %s\n", ioc->name, __func__)); + if (ioc->log_buffer == NULL) { + ioc->log_buffer = + dma_alloc_coherent(&ioc->pdev->dev, SYS_LOG_BUF_SIZE, + &ioc->log_buffer_dma, GFP_KERNEL); + } + memset(&mpi_request, 0, sizeof(struct LeapioraidIOCLogReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_LOG_INIT; + mpi_request.BufAddr = ioc->log_buffer_dma; + mpi_request.BufSize = SYS_LOG_BUF_SIZE; + r = leapioraid_base_handshake_req_reply_wait(ioc, + sizeof + (struct LeapioraidIOCLogReq_t), + (u32 *) &mpi_request, + sizeof + (struct LeapioraidIOCLogRep_t), + (u16 *) &mpi_reply, 30); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS || mpi_reply.IOCLogInfo) { + pr_err("%s %s: failed\n", ioc->name, + __func__); + r = -EIO; + } + return r; +} + +static int +leapioraid_base_trace_log_exit(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->log_buffer) + dma_free_coherent(&ioc->pdev->dev, SYS_LOG_BUF_SIZE, + ioc->log_buffer, ioc->log_buffer_dma); + return 0; +} + +u8 +leapioraid_port_enable_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + u16 ioc_status; + + if (ioc->port_enable_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + if (mpi_reply->Function != LEAPIORAID_FUNC_PORT_ENABLE) + return 1; + ioc->port_enable_cmds.status &= ~LEAPIORAID_CMD_PENDING; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_COMPLETE; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + memcpy(ioc->port_enable_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + ioc->port_enable_failed = 1; + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_COMPLETE_ASYNC) { + ioc->port_enable_cmds.status &= ~LEAPIORAID_CMD_COMPLETE_ASYNC; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) { + leapioraid_port_enable_complete(ioc); + return 1; + } + + ioc->start_scan_failed = ioc_status; + ioc->start_scan = 0; + return 1; + } + complete(&ioc->port_enable_cmds.done); + return 1; +} + +static int +leapioraid_base_send_port_enable(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidPortEnableReq_t *mpi_request; + struct LeapioraidPortEnableRep_t *mpi_reply; + int r = 0; + u16 smid; + u16 ioc_status; + + pr_info("%s sending port enable !!\n", ioc->name); + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err( + "%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + smid = leapioraid_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->port_enable_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidPortEnableReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_PORT_ENABLE; + init_completion(&ioc->port_enable_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300 * HZ); + if (!(ioc->port_enable_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidPortEnableReq_t) / 4); + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_RESET) + r = -EFAULT; + else + r = -ETIME; + goto out; + } + mpi_reply = ioc->port_enable_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s %s: failed with (ioc_status=0x%08x)\n", ioc->name, + __func__, ioc_status); + r = -EFAULT; + goto out; + } +out: + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + pr_info("%s port enable: %s\n", ioc->name, ((r == 0) ? + "SUCCESS" + : + "FAILED")); + return r; +} + +int +leapioraid_port_enable(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidPortEnableReq_t *mpi_request; + u16 smid; + + pr_info("%s sending port enable !!\n", ioc->name); + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err( + "%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + smid = leapioraid_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->drv_internal_flags |= LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED; + ioc->port_enable_cmds.status = LEAPIORAID_CMD_PENDING; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_COMPLETE_ASYNC; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidPortEnableReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_PORT_ENABLE; + ioc->put_smid_default(ioc, smid); + return 0; +} + +static int +leapioraid_base_determine_wait_on_discovery(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->ir_firmware) + return 1; + if (!ioc->bios_pg3.BiosVersion) + return 0; + if ((ioc->bios_pg2.CurrentBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK) == + LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + (ioc->bios_pg2.ReqBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK) == + LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + (ioc->bios_pg2.ReqAltBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK) == + LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) + return 0; + return 1; +} + +static void +leapioraid_base_unmask_events(struct LEAPIORAID_ADAPTER *ioc, u16 event) +{ + u32 desired_event; + + if (event >= 128) + return; + desired_event = (1 << (event % 32)); + if (event < 32) + ioc->event_masks[0] &= ~desired_event; + else if (event < 64) + ioc->event_masks[1] &= ~desired_event; + else if (event < 96) + ioc->event_masks[2] &= ~desired_event; + else if (event < 128) + ioc->event_masks[3] &= ~desired_event; +} + +static int +leapioraid_base_event_notification(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidEventNotificationReq_t *mpi_request; + u16 smid; + int r = 0; + int i, issue_diag_reset = 0; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err( + "%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidEventNotificationReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_EVENT_NOTIFICATION; + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + for (i = 0; i < LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + mpi_request->EventMasks[i] = cpu_to_le32(ioc->event_masks[i]); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, 30 * HZ); + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidEventNotificationReq_t) / 4); + if (ioc->base_cmds.status & LEAPIORAID_CMD_RESET) + r = -EFAULT; + else + issue_diag_reset = 1; + } else + dinitprintk(ioc, pr_info("%s %s: complete\n", + ioc->name, __func__)); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (issue_diag_reset) { + if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED) + return -EFAULT; + if (leapioraid_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + return r; +} + +void +leapioraid_base_validate_event_type(struct LEAPIORAID_ADAPTER *ioc, + u32 *event_type) +{ + int i, j; + u32 event_mask, desired_event; + u8 send_update_to_fw; + + for (i = 0, send_update_to_fw = 0; i < + LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { + event_mask = ~event_type[i]; + desired_event = 1; + for (j = 0; j < 32; j++) { + if (!(event_mask & desired_event) && + (ioc->event_masks[i] & desired_event)) { + ioc->event_masks[i] &= ~desired_event; + send_update_to_fw = 1; + } + desired_event = (desired_event << 1); + } + } + if (!send_update_to_fw) + return; + mutex_lock(&ioc->base_cmds.mutex); + leapioraid_base_event_notification(ioc); + mutex_unlock(&ioc->base_cmds.mutex); +} + +int +leapioraid_base_make_ioc_ready(struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type) +{ + u32 ioc_state; + int rc; + int count; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!leapioraid_base_pci_device_is_available(ioc)) + return 0; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + count = 0; + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_RESET) { + while ((ioc_state & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_READY) { + if (count++ == 10) { + pr_err( + "%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } + ssleep(1); + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + } + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_READY) + return 0; + if (ioc_state & LEAPIORAID_DOORBELL_USED) { + pr_info("%s unexpected doorbell active!\n", + ioc->name); + goto issue_diag_reset; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP) { + if (ioc->ioc_coredump_loop != 0xFF) { + leapioraid_base_coredump_info(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_wait_for_coredump_completion(ioc, + __func__); + } + goto issue_diag_reset; + } + if (type == FORCE_BIG_HAMMER) + goto issue_diag_reset; + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_OPERATIONAL) + if (! + (leapioraid_base_send_ioc_reset + (ioc, LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET, 15))) { + return 0; + } +issue_diag_reset: + rc = leapioraid_base_diag_reset(ioc); + return rc; +} + +static int +leapioraid_base_make_ioc_operational(struct LEAPIORAID_ADAPTER *ioc) +{ + int r, rc, i, index; + unsigned long flags; + u32 reply_address; + u16 smid; + struct leapioraid_tr_list *delayed_tr, *delayed_tr_next; + struct leapioraid_sc_list *delayed_sc, *delayed_sc_next; + struct leapioraid_event_ack_list *delayed_event_ack, *delayed_event_ack_next; + struct leapioraid_adapter_reply_queue *reply_q; + union LeapioraidRepDescUnion_t *reply_post_free_contig; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_volume_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_internal_tm_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + list_for_each_entry_safe(delayed_sc, delayed_sc_next, + &ioc->delayed_sc_list, list) { + list_del(&delayed_sc->list); + kfree(delayed_sc); + } + list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, + &ioc->delayed_event_ack_list, list) { + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + } + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + INIT_LIST_HEAD(&ioc->hpr_free_list); + smid = ioc->hi_priority_smid; + for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { + ioc->hpr_lookup[i].cb_idx = 0xFF; + ioc->hpr_lookup[i].smid = smid; + list_add_tail(&ioc->hpr_lookup[i].tracker_list, + &ioc->hpr_free_list); + } + INIT_LIST_HEAD(&ioc->internal_free_list); + smid = ioc->internal_smid; + for (i = 0; i < ioc->internal_depth; i++, smid++) { + ioc->internal_lookup[i].cb_idx = 0xFF; + ioc->internal_lookup[i].smid = smid; + list_add_tail(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + for (i = 0, reply_address = (u32) ioc->reply_dma; + i < ioc->reply_free_queue_depth; i++, reply_address += + ioc->reply_sz) { + ioc->reply_free[i] = cpu_to_le32(reply_address); + } + if (ioc->is_driver_loading) + leapioraid_base_assign_reply_queues(ioc); + index = 0; + reply_post_free_contig = ioc->reply_post[0].reply_post_free; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->rdpq_array_enable) { + reply_q->reply_post_free = + ioc->reply_post[index++].reply_post_free; + } else { + reply_q->reply_post_free = reply_post_free_contig; + reply_post_free_contig += ioc->reply_post_queue_depth; + } + reply_q->reply_post_host_index = 0; + for (i = 0; i < ioc->reply_post_queue_depth; i++) + reply_q->reply_post_free[i].Words = + cpu_to_le64(ULLONG_MAX); + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_free_queue; + } +skip_init_reply_post_free_queue: + r = leapioraid_base_send_ioc_init(ioc); + if (r) { + if (!ioc->is_driver_loading) + return r; + rc = leapioraid_base_check_for_fault_and_issue_reset(ioc); + if (rc || (leapioraid_base_send_ioc_init(ioc))) + return r; + } + ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; + writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->combined_reply_queue) { + for (i = 0; i < ioc->nc_reply_index_count; i++) + writel((reply_q->msix_index & 7) << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT, + ioc->replyPostRegisterIndex[i]); + } else { + writel(reply_q->msix_index << LEAPIORAID_RPHI_MSIX_INDEX_SHIFT, + &ioc->chip->ReplyPostHostIndex); + } + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_host_index; + } +skip_init_reply_post_host_index: + leapioraid_base_unmask_interrupts(ioc); + r = leapioraid_base_display_fwpkg_version(ioc); + if (r) + return r; + r = leapioraid_base_static_config_pages(ioc); + if (r) + return r; + r = leapioraid_base_event_notification(ioc); + if (r) + return r; + leapioraid_base_start_hba_unplug_watchdog(ioc); + if (!ioc->shost_recovery) { + ioc->wait_for_discovery_to_complete = + leapioraid_base_determine_wait_on_discovery(ioc); + return r; + } + r = leapioraid_base_send_port_enable(ioc); + if (r) + return r; + return r; +} + +void +leapioraid_base_free_resources(struct LEAPIORAID_ADAPTER *ioc) +{ + dexitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!ioc->chip_phys) + return; + leapioraid_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + leapioraid_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + leapioraid_base_unmap_resources(ioc); +} + +int +leapioraid_base_attach(struct LEAPIORAID_ADAPTER *ioc) +{ + int r, rc, i; + int cpu_id, last_cpu_id = 0; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + ioc->cpu_count = num_online_cpus(); + for_each_online_cpu(cpu_id) + last_cpu_id = cpu_id; + ioc->cpu_msix_table_sz = last_cpu_id + 1; + ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); + ioc->reply_queue_count = 1; + if (!ioc->cpu_msix_table) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->rdpq_array_enable_assigned = 0; + ioc->use_32bit_dma = 0; + ioc->dma_mask = 64; + ioc->base_readl = &leapioraid_base_readl_aero; + ioc->smp_affinity_enable = smp_affinity_enable; + r = leapioraid_base_map_resources(ioc); + if (r) + goto out_free_resources; + pci_set_drvdata(ioc->pdev, ioc->shost); + r = leapioraid_base_get_ioc_facts(ioc); + if (r) { + rc = leapioraid_base_check_for_fault_and_issue_reset(ioc); + if (rc || (leapioraid_base_get_ioc_facts(ioc))) + goto out_free_resources; + } + + ioc->build_sg_scmd = &leapioraid_base_build_sg_scmd_ieee; + ioc->build_sg = &leapioraid_base_build_sg_ieee; + ioc->build_zero_len_sge = + &leapioraid_base_build_zero_len_sge_ieee; + ioc->sge_size_ieee = sizeof(struct LEAPIORAID_IEEE_SGE_SIMPLE64); + if (ioc->high_iops_queues) + ioc->get_msix_index_for_smlio = + &leapioraid_base_get_high_iops_msix_index; + else + ioc->get_msix_index_for_smlio = &leapioraid_base_get_msix_index; + + if (ioc->atomic_desc_capable) { + ioc->put_smid_default = + &leapioraid_base_put_smid_default_atomic; + ioc->put_smid_scsi_io = + &leapioraid_base_put_smid_scsi_io_atomic; + ioc->put_smid_fast_path = + &leapioraid_base_put_smid_fast_path_atomic; + ioc->put_smid_hi_priority = + &leapioraid_base_put_smid_hi_priority_atomic; + } else { + ioc->put_smid_default = &leapioraid_base_put_smid_default; + ioc->put_smid_scsi_io = &leapioraid_base_put_smid_scsi_io; + ioc->put_smid_fast_path = &leapioraid_base_put_smid_fast_path; + ioc->put_smid_hi_priority = + &leapioraid_base_put_smid_hi_priority; + } + ioc->build_sg_mpi = &leapioraid_base_build_sg; + ioc->build_zero_len_sge_mpi = &leapioraid_base_build_zero_len_sge; + r = leapioraid_base_make_ioc_ready(ioc, SOFT_RESET); + if (r) + goto out_free_resources; + if (ioc->open_pcie_trace) { + r = leapioraid_base_trace_log_init(ioc); + if (r) { + pr_err("log init failed\n"); + goto out_free_resources; + } + } + ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, + sizeof(struct leapioraid_port_facts), GFP_KERNEL); + if (!ioc->pfacts) { + r = -ENOMEM; + goto out_free_resources; + } + for (i = 0; i < ioc->facts.NumberOfPorts; i++) { + r = leapioraid_base_get_port_facts(ioc, i); + if (r) { + rc = leapioraid_base_check_for_fault_and_issue_reset + (ioc); + if (rc || (leapioraid_base_get_port_facts(ioc, i))) + goto out_free_resources; + } + } + r = leapioraid_base_allocate_memory_pools(ioc); + if (r) + goto out_free_resources; + if (irqpoll_weight > 0) + ioc->thresh_hold = irqpoll_weight; + else + ioc->thresh_hold = ioc->hba_queue_depth / 4; + leapioraid_base_init_irqpolls(ioc); + init_waitqueue_head(&ioc->reset_wq); + ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pd_handles_sz++; + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); + if (!ioc->pd_handles) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); + if (!ioc->blocking_handles) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pend_os_device_add_sz++; + ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, + GFP_KERNEL); + if (!ioc->pend_os_device_add) + goto out_free_resources; + ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; + ioc->device_remove_in_progress = + kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); + if (!ioc->device_remove_in_progress) + goto out_free_resources; + ioc->tm_tr_retry_sz = ioc->facts.MaxDevHandle * sizeof(u8); + ioc->tm_tr_retry = kzalloc(ioc->tm_tr_retry_sz, GFP_KERNEL); + if (!ioc->tm_tr_retry) + goto out_free_resources; + ioc->fwfault_debug = leapioraid_fwfault_debug; + mutex_init(&ioc->base_cmds.mutex); + ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->transport_cmds.mutex); + ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->scsih_cmds.mutex); + ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->tm_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->tm_cmds.mutex); + ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->config_cmds.mutex); + ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + ioc->ctl_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->ctl_cmds.mutex); + + if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || + !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || + !ioc->tm_cmds.reply || !ioc->config_cmds.reply || + !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { + r = -ENOMEM; + goto out_free_resources; + } + for (i = 0; i < LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + ioc->event_masks[i] = -1; + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_SAS_DISCOVERY); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_VOLUME); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_PHYSICAL_DISK); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_OPERATION_STATUS); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_LOG_ENTRY_ADDED); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_TEMP_THRESHOLD); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR); + r = leapioraid_base_make_ioc_operational(ioc); + if (r == -EAGAIN) + r = leapioraid_base_make_ioc_operational(ioc); + if (r) + goto out_free_resources; + memcpy(&ioc->prev_fw_facts, &ioc->facts, + sizeof(struct leapioraid_facts)); + ioc->non_operational_loop = 0; + ioc->ioc_coredump_loop = 0; + ioc->got_task_abort_from_ioctl = 0; + ioc->got_task_abort_from_sysfs = 0; + return 0; +out_free_resources: + ioc->remove_host = 1; + leapioraid_base_free_resources(ioc); + leapioraid_base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->tm_tr_retry); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->pfacts); + ioc->ctl_cmds.reply = NULL; + ioc->base_cmds.reply = NULL; + ioc->tm_cmds.reply = NULL; + ioc->scsih_cmds.reply = NULL; + ioc->transport_cmds.reply = NULL; + ioc->config_cmds.reply = NULL; + ioc->pfacts = NULL; + return r; +} + +void +leapioraid_base_detach(struct LEAPIORAID_ADAPTER *ioc) +{ + dexitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->open_pcie_trace) + leapioraid_base_trace_log_exit(ioc); + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_free_resources(ioc); + leapioraid_base_release_memory_pools(ioc); + leapioraid_free_enclosure_list(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->tm_tr_retry); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); + kfree(ioc->pfacts); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); +} + +static void +leapioraid_base_clear_outstanding_leapioraid_commands(struct LEAPIORAID_ADAPTER + *ioc) +{ + struct leapioraid_internal_qcmd *scsih_qcmd, *scsih_qcmd_next; + unsigned long flags; + + if (ioc->transport_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->transport_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->transport_cmds.smid); + complete(&ioc->transport_cmds.done); + } + if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->base_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->base_cmds.smid); + complete(&ioc->base_cmds.done); + } + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->port_enable_failed = 1; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->port_enable_cmds.smid); + if (ioc->is_driver_loading) { + ioc->start_scan_failed = + LEAPIORAID_IOCSTATUS_INTERNAL_ERROR; + ioc->start_scan = 0; + } else + complete(&ioc->port_enable_cmds.done); + } + if (ioc->config_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->config_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->config_cmds.smid); + ioc->config_cmds.smid = USHORT_MAX; + complete(&ioc->config_cmds.done); + } + spin_lock_irqsave(&ioc->scsih_q_internal_lock, flags); + list_for_each_entry_safe(scsih_qcmd, scsih_qcmd_next, + &ioc->scsih_q_intenal_cmds, list) { + if ((scsih_qcmd->status) & LEAPIORAID_CMD_PENDING) { + scsih_qcmd->status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, scsih_qcmd->smid); + } + } + spin_unlock_irqrestore(&ioc->scsih_q_internal_lock, flags); +} + +static void +leapioraid_base_reset_handler(struct LEAPIORAID_ADAPTER *ioc, int reset_phase) +{ + leapioraid_scsihost_reset_handler(ioc, reset_phase); + leapioraid_ctl_reset_handler(ioc, reset_phase); + switch (reset_phase) { + case LEAPIORAID_IOC_PRE_RESET_PHASE: + dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n", + ioc->name, __func__)); + break; + case LEAPIORAID_IOC_AFTER_RESET_PHASE: + dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n", + ioc->name, __func__)); + leapioraid_base_clear_outstanding_leapioraid_commands(ioc); + break; + case LEAPIORAID_IOC_DONE_RESET_PHASE: + dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n", + ioc->name, __func__)); + break; + } +} + +void +leapioraid_wait_for_commands_to_complete(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 ioc_state; + unsigned long flags; + u16 i; + struct leapioraid_scsiio_tracker *st; + + ioc->pending_io_count = 0; + if (!leapioraid_base_pci_device_is_available(ioc)) { + pr_err("%s %s: pci error recovery reset or pci device unplug occurred\n", + ioc->name, __func__); + return; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_OPERATIONAL) + return; + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + for (i = 1; i <= ioc->scsiio_depth; i++) { + st = leapioraid_get_st_from_smid(ioc, i); + if (st && st->smid != 0) { + if (st->cb_idx != 0xFF) + ioc->pending_io_count++; + } + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + if (!ioc->pending_io_count) + return; + wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); +} + +static int +leapioraid_base_check_ioc_facts_changes(struct LEAPIORAID_ADAPTER *ioc) +{ + u16 pd_handles_sz, tm_tr_retry_sz; + void *pd_handles = NULL, *blocking_handles = NULL; + void *pend_os_device_add = NULL, *device_remove_in_progress = NULL; + u8 *tm_tr_retry = NULL; + struct leapioraid_facts *old_facts = &ioc->prev_fw_facts; + + if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { + pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + pd_handles_sz++; + pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, + GFP_KERNEL); + if (!pd_handles) { + pr_err( + "%s Unable to allocate the memory for pd_handles of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(pd_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->pd_handles = pd_handles; + blocking_handles = + krealloc(ioc->blocking_handles, pd_handles_sz, GFP_KERNEL); + if (!blocking_handles) { + pr_err( + "%s Unable to allocate the memory for blocking_handles of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(blocking_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->blocking_handles = blocking_handles; + ioc->pd_handles_sz = pd_handles_sz; + pend_os_device_add = + krealloc(ioc->pend_os_device_add, pd_handles_sz, + GFP_KERNEL); + if (!pend_os_device_add) { + pr_err( + "%s Unable to allocate the memory for pend_os_device_add of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, + (pd_handles_sz - ioc->pend_os_device_add_sz)); + ioc->pend_os_device_add = pend_os_device_add; + ioc->pend_os_device_add_sz = pd_handles_sz; + device_remove_in_progress = + krealloc(ioc->device_remove_in_progress, pd_handles_sz, + GFP_KERNEL); + if (!device_remove_in_progress) { + pr_err( + "%s Unable to allocate the memory for device_remove_in_progress of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(device_remove_in_progress + + ioc->device_remove_in_progress_sz, 0, + (pd_handles_sz - ioc->device_remove_in_progress_sz)); + ioc->device_remove_in_progress = device_remove_in_progress; + ioc->device_remove_in_progress_sz = pd_handles_sz; + tm_tr_retry_sz = ioc->facts.MaxDevHandle * sizeof(u8); + tm_tr_retry = krealloc(ioc->tm_tr_retry, tm_tr_retry_sz, + GFP_KERNEL); + if (!tm_tr_retry) { + pr_err( + "%s Unable to allocate the memory for tm_tr_retry of sz: %d\n", + ioc->name, tm_tr_retry_sz); + return -ENOMEM; + } + memset(tm_tr_retry + ioc->tm_tr_retry_sz, 0, + (tm_tr_retry_sz - ioc->tm_tr_retry_sz)); + ioc->tm_tr_retry = tm_tr_retry; + ioc->tm_tr_retry_sz = tm_tr_retry_sz; + } + memcpy(&ioc->prev_fw_facts, &ioc->facts, + sizeof(struct leapioraid_facts)); + return 0; +} + +int +leapioraid_base_hard_reset_handler( + struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type) +{ + int r; + unsigned long flags; + + dtmprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { + do { + ssleep(1); + } while (ioc->shost_recovery == 1); + dtmprintk(ioc, + pr_info("%s %s: exit\n", ioc->name, + __func__)); + return ioc->ioc_reset_status; + } + if (!leapioraid_base_pci_device_is_available(ioc)) { + pr_err( + "%s %s: pci error recovery reset or pci device unplug occurred\n", + ioc->name, __func__); + if (leapioraid_base_pci_device_is_unplugged(ioc)) { + leapioraid_base_pause_mq_polling(ioc); + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + leapioraid_base_resume_mq_polling(ioc); + } + r = 0; + goto out_unlocked; + } + leapioraid_halt_firmware(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + leapioraid_base_get_iocstate(ioc, 0); + leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_PRE_RESET_PHASE); + leapioraid_wait_for_commands_to_complete(ioc); + leapioraid_base_mask_interrupts(ioc); + leapioraid_base_pause_mq_polling(ioc); + r = leapioraid_base_make_ioc_ready(ioc, type); + if (r) + goto out; + leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_AFTER_RESET_PHASE); + if (ioc->is_driver_loading && ioc->port_enable_failed) { + ioc->remove_host = 1; + r = -EFAULT; + goto out; + } + r = leapioraid_base_get_ioc_facts(ioc); + if (r) + goto out; + r = leapioraid_base_check_ioc_facts_changes(ioc); + if (r) { + pr_err( + "%s Some of the parameters got changed in this\n\t\t" + "new firmware image and it requires system reboot\n", + ioc->name); + goto out; + } + if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) + panic( + "%s: Issue occurred with flashing controller firmware.\n\t\t" + "Please reboot the system and ensure that the correct\n\t\t" + "firmware version is running\n", + ioc->name); + r = leapioraid_base_make_ioc_operational(ioc); + if (!r) + leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_DONE_RESET_PHASE); +out: + pr_info("%s %s: %s\n", + ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_status = r; + ioc->shost_recovery = 0; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_count++; + mutex_unlock(&ioc->reset_in_progress_mutex); +#if defined(DISABLE_RESET_SUPPORT) + if (r != 0) { + struct task_struct *p; + + ioc->remove_host = 1; + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + p = kthread_run(leapioraid_remove_dead_ioc_func, ioc, + "leapioraid_dead_ioc_%d", ioc->id); + if (IS_ERR(p)) + pr_err( + "%s %s: Running leapioraid_dead_ioc thread failed !!!!\n", + ioc->name, __func__); + else + pr_err( + "%s %s: Running leapioraid_dead_ioc thread success !!!!\n", + ioc->name, __func__); + } +#else + if (r != 0) + ioc->schedule_dead_ioc_flush_running_cmds(ioc); +#endif + leapioraid_base_resume_mq_polling(ioc); +out_unlocked: + dtmprintk(ioc, pr_info("%s %s: exit\n", ioc->name, + __func__)); + return r; +} + +struct config_request { + u16 sz; + void *page; + dma_addr_t page_dma; +}; + +static void +leapioraid_config_display_some_debug(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + char *calling_function_name, + struct LeapioraidDefaultRep_t *mpi_reply) +{ + struct LeapioraidCfgReq_t *mpi_request; + char *desc = NULL; + + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + switch (mpi_request->Header.PageType & LEAPIORAID_CONFIG_PAGETYPE_MASK) { + case LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT: + desc = "io_unit"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_IOC: + desc = "ioc"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_BIOS: + desc = "bios"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME: + desc = "raid_volume"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING: + desc = "manufacturing"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK: + desc = "physdisk"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_EXTENDED: + switch (mpi_request->ExtPageType) { + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT: + desc = "sas_io_unit"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER: + desc = "sas_expander"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE: + desc = "sas_device"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY: + desc = "sas_phy"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_LOG: + desc = "log"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE: + desc = "enclosure"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG: + desc = "raid_config"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: + desc = "driver_mapping"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PORT: + desc = "sas_port"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING: + desc = "ext_manufacturing"; + break; + } + break; + } + if (!desc) + return; + pr_info("%s %s: %s(%d), action(%d), form(0x%08x), smid(%d)\n", + ioc->name, calling_function_name, desc, + mpi_request->Header.PageNumber, mpi_request->Action, + le32_to_cpu(mpi_request->PageAddress), smid); + if (!mpi_reply) + return; + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + pr_err( + "%s \tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); +} + +static int +leapioraid_config_alloc_config_dma_memory(struct LEAPIORAID_ADAPTER *ioc, + struct config_request *mem) +{ + int r = 0; + + if (mem->sz > ioc->config_page_sz) { + mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, + &mem->page_dma, GFP_KERNEL); + if (!mem->page) + r = -ENOMEM; + } else { + mem->page = ioc->config_page; + mem->page_dma = ioc->config_page_dma; + } + ioc->config_vaddr = mem->page; + return r; +} + +static void +leapioraid_config_free_config_dma_memory(struct LEAPIORAID_ADAPTER *ioc, + struct config_request *mem) +{ + if (mem->sz > ioc->config_page_sz) + dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page, + mem->page_dma); +} + +u8 +leapioraid_config_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + if (ioc->config_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->config_cmds.smid != smid) + return 1; + ioc->config_cmds.status |= LEAPIORAID_CMD_COMPLETE; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + ioc->config_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + memcpy(ioc->config_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + } + ioc->config_cmds.status &= ~LEAPIORAID_CMD_PENDING; + if (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG) + leapioraid_config_display_some_debug( + ioc, smid, "config_done", mpi_reply); + ioc->config_cmds.smid = USHORT_MAX; + complete(&ioc->config_cmds.done); + return 1; +} + +static int +leapioraid_config_request( + struct LEAPIORAID_ADAPTER *ioc, struct LeapioraidCfgReq_t *mpi_request, + struct LeapioraidCfgRep_t *mpi_reply, int timeout, + void *config_page, u16 config_page_sz) +{ + u16 smid; + struct LeapioraidCfgReq_t *config_request; + int r; + u8 retry_count, issue_host_reset = 0; + struct config_request mem; + u32 ioc_status = UINT_MAX; + u8 issue_reset; + + mutex_lock(&ioc->config_cmds.mutex); + if (ioc->config_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: config_cmd in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->config_cmds.mutex); + return -EAGAIN; + } + retry_count = 0; + memset(&mem, 0, sizeof(struct config_request)); + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + if (config_page) { + mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; + mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; + mpi_request->Header.PageType = mpi_reply->Header.PageType; + mpi_request->Header.PageLength = mpi_reply->Header.PageLength; + mpi_request->ExtPageLength = mpi_reply->ExtPageLength; + mpi_request->ExtPageType = mpi_reply->ExtPageType; + if (mpi_request->Header.PageLength) + mem.sz = mpi_request->Header.PageLength * 4; + else + mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; + r = leapioraid_config_alloc_config_dma_memory(ioc, &mem); + if (r != 0) + goto out; + if (mpi_request->Action == + LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT || + mpi_request->Action == + LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM) { + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + LEAPIORAID_CONFIG_COMMON_WRITE_SGLFLAGS + | mem.sz, mem.page_dma); + memcpy(mem.page, config_page, + min_t(u16, mem.sz, config_page_sz)); + } else { + memset(config_page, 0, config_page_sz); + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + LEAPIORAID_CONFIG_COMMON_SGLFLAGS + | mem.sz, mem.page_dma); + memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz)); + } + } +retry_config: + if (retry_count) { + if (retry_count > 2) { + r = -EFAULT; + goto free_mem; + } + pr_info("%s %s: attempting retry (%d)\n", + ioc->name, __func__, retry_count); + } + r = leapioraid_wait_for_ioc_to_operational(ioc, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT); + if (r) { + if (r == -ETIME) + issue_host_reset = 1; + goto free_mem; + } + smid = leapioraid_base_get_smid(ioc, ioc->config_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED; + r = -EAGAIN; + goto free_mem; + } + r = 0; + memset(mpi_reply, 0, sizeof(struct LeapioraidCfgRep_t)); + memset(ioc->config_cmds.reply, 0, sizeof(struct LeapioraidCfgRep_t)); + ioc->config_cmds.status = LEAPIORAID_CMD_PENDING; + config_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->config_cmds.smid = smid; + memcpy(config_request, mpi_request, sizeof(struct LeapioraidCfgReq_t)); + if (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG) + leapioraid_config_display_some_debug(ioc, smid, "config_request", NULL); + init_completion(&ioc->config_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->config_cmds.done, timeout * HZ); + if (!(ioc->config_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request no reply", + NULL); + leapioraid_check_cmd_timeout(ioc, ioc->config_cmds.status, + mpi_request, + sizeof(struct LeapioraidCfgReq_t) / 4, + issue_reset); + pr_info("%s issue_reset=%d\n", __func__, issue_reset); + retry_count++; + if (ioc->config_cmds.smid == smid) + leapioraid_base_free_smid(ioc, smid); + if (ioc->config_cmds.status & LEAPIORAID_CMD_RESET) + goto retry_config; + if (ioc->shost_recovery || ioc->pci_error_recovery) { + issue_host_reset = 0; + r = -EFAULT; + } else + issue_host_reset = 1; + goto free_mem; + } + if (ioc->config_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + memcpy(mpi_reply, ioc->config_cmds.reply, + sizeof(struct LeapioraidCfgRep_t)); + if ((mpi_request->Header.PageType & 0xF) != + (mpi_reply->Header.PageType & 0xF)) { + if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + panic( + "%s %s: Firmware BUG: mpi_reply mismatch:\n\t\t" + "Requested PageType(0x%02x) Reply PageType(0x%02x)\n", + ioc->name, + __func__, + (mpi_request->Header.PageType & 0xF), + (mpi_reply->Header.PageType & 0xF)); + } + if (((mpi_request->Header.PageType & 0xF) == + LEAPIORAID_CONFIG_PAGETYPE_EXTENDED) && + mpi_request->ExtPageType != mpi_reply->ExtPageType) { + if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + panic( + "%s %s: Firmware BUG: mpi_reply mismatch:\n\t\t" + "Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", + ioc->name, + __func__, + mpi_request->ExtPageType, + mpi_reply->ExtPageType); + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + } + if (retry_count) + pr_info("%s %s: retry (%d) completed!!\n", + ioc->name, __func__, retry_count); + if ((ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) && + config_page && mpi_request->Action == + LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT) { + u8 *p = (u8 *) mem.page; + + if (p) { + if ((mpi_request->Header.PageType & 0xF) != + (p[3] & 0xF)) { + if (! + (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, + ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + leapioraid_debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz) / + 4); + panic( + "%s %s: Firmware BUG: config page mismatch:\n\t\t" + "Requested PageType(0x%02x) Reply PageType(0x%02x)\n", + ioc->name, + __func__, + (mpi_request->Header.PageType & 0xF), + (p[3] & 0xF)); + } + if (((mpi_request->Header.PageType & 0xF) == + LEAPIORAID_CONFIG_PAGETYPE_EXTENDED) && + (mpi_request->ExtPageType != p[6])) { + if (! + (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, + ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + leapioraid_debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz) / + 4); + panic( + "%s %s: Firmware BUG: config page mismatch:\n\t\t" + "Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", + ioc->name, + __func__, + mpi_request->ExtPageType, + p[6]); + } + } + memcpy(config_page, mem.page, min_t(u16, mem.sz, + config_page_sz)); + } +free_mem: + if (config_page) + leapioraid_config_free_config_dma_memory(ioc, &mem); +out: + ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->config_cmds.mutex); + if (issue_host_reset) { + if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED) { + leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + r = -EFAULT; + } else { + if (leapioraid_base_check_for_fault_and_issue_reset + (ioc)) + return -EFAULT; + r = -EAGAIN; + } + } + return r; +} + +int +leapioraid_config_get_manufacturing_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManP0_t * + config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_manufacturing_pg10(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP10_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 10; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_set_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_bios_pg2(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP2_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_bios_pg3(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP3_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = 0x01; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP0_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x02; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_set_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_iounit_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP8_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_set_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_ioc_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP8_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_sas_device_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasDevP0_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = 0x09; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_number_hba_phys(struct LEAPIORAID_ADAPTER *ioc, + u8 *num_phys) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + u16 ioc_status; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t config_page; + + *num_phys = 0; + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x05; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(struct LeapioraidSasIOUnitP0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) + *num_phys = config_page.NumPhys; + } +out: + return r; +} + +int +leapioraid_config_get_sas_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP0_t *config_page, + u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x05; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_get_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x09; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_set_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x09; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_get_expander_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP0_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x06; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_expander_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP1_t *config_page, + u32 phy_number, u16 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x02; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | + (phy_number << LEAPIORAID_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | + handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_enclosure_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasEncP0_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_phy_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP0_t *config_page, + u32 phy_number) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x03; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_phy_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP1_t *config_page, + u32 phy_number) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x01; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_raid_volume_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP1_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x03; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_number_pds(struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 *num_pds) +{ + struct LeapioraidCfgReq_t mpi_request; + struct LeapioraidRaidVolP0_t config_page; + struct LeapioraidCfgRep_t mpi_reply; + int r; + u16 ioc_status; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + *num_pds = 0; + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x0A; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(struct LeapioraidRaidVolP0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) + *num_pds = config_page.NumPhysDisks; + } +out: + return r; +} + +int +leapioraid_config_get_raid_volume_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP0_t *config_page, + u32 form, u32 handle, u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x0A; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_get_phys_disk_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidPDP0_t *config_page, + u32 form, u32 form_specific) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x05; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | form_specific); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_volume_handle(struct LEAPIORAID_ADAPTER *ioc, + u16 pd_handle, u16 *volume_handle) +{ + struct LeapioraidRaidCfgP0_t *config_page = NULL; + struct LeapioraidCfgReq_t mpi_request; + struct LeapioraidCfgRep_t mpi_reply; + int r, i, config_page_sz; + u16 ioc_status; + int config_num; + u16 element_type; + u16 phys_disk_dev_handle; + + *volume_handle = 0; + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG; + mpi_request.Header.PageVersion = 0x00; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); + config_page = kmalloc(config_page_sz, GFP_KERNEL); + if (!config_page) { + r = -1; + goto out; + } + config_num = 0xff; + while (1) { + mpi_request.PageAddress = cpu_to_le32(config_num + + LEAPIORAID_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, + config_page, config_page_sz); + if (r) + goto out; + r = -1; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < config_page->NumElements; i++) { + element_type = + le16_to_cpu(config_page->ConfigElement[i].ElementFlags) & + LEAPIORAID_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE; + if (element_type == + LEAPIORAID_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT + || element_type == + LEAPIORAID_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) { + phys_disk_dev_handle = + le16_to_cpu(config_page->ConfigElement[i].PhysDiskDevHandle); + if (phys_disk_dev_handle == pd_handle) { + *volume_handle = + le16_to_cpu + (config_page->ConfigElement[i].VolDevHandle); + r = 0; + goto out; + } + } else if (element_type == + LEAPIORAID_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) { + *volume_handle = 0; + r = 0; + goto out; + } + } + config_num = config_page->ConfigNum; + } +out: + kfree(config_page); + return r; +} + +int +leapioraid_config_get_volume_wwid(struct LEAPIORAID_ADAPTER *ioc, + u16 volume_handle, u64 *wwid) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidRaidVolP1_t raid_vol_pg1; + + *wwid = 0; + if (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply, + &raid_vol_pg1, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + volume_handle))) { + *wwid = le64_to_cpu(raid_vol_pg1.WWID); + return 0; + } else + return -1; +} diff --git a/drivers/scsi/leapioraid/leapioraid_func.h b/drivers/scsi/leapioraid/leapioraid_func.h new file mode 100644 index 000000000000..9cf8206ccb3c --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_func.h @@ -0,0 +1,1258 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#ifndef LEAPIORAID_FUNC_H_INCLUDED +#define LEAPIORAID_FUNC_H_INCLUDED + +#include "leapioraid.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef fallthrough +#define fallthrough +#endif + +#define SYS_LOG_BUF_SIZE (0x20000) +#define MAX_UPD_PAYLOAD_SZ (0x4000) + +#define LEAPIORAID_DRIVER_NAME "LeapIoRaid" +#define LEAPIORAID_AUTHOR "LeapIO Inc." +#define LEAPIORAID_DESCRIPTION "LEAPIO RAID Driver" +#define LEAPIORAID_DRIVER_VERSION "1.00.00.00" +#define LEAPIORAID_MAJOR_VERSION (1) +#define LEAPIORAID_MINOR_VERSION (00) +#define LEAPIORAID_BUILD_VERSION (00) +#define LEAPIORAID_RELEASE_VERSION (00) + +#define LEAPIORAID_VENDOR_ID (0xD405) +#define LEAPIORAID_DEVICE_ID_1 (0x1000) +#define LEAPIORAID_DEVICE_ID_2 (0x1001) + +#define LEAPIORAID_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE + +#define LEAPIORAID_MIN_PHYS_SEGMENTS (16) +#define LEAPIORAID_KDUMP_MIN_PHYS_SEGMENTS (32) + +#define LEAPIORAID_MAX_SG_SEGMENTS SG_MAX_SEGMENTS +#define LEAPIORAID_MAX_PHYS_SEGMENTS_STRING "SG_CHUNK_SIZE" + +#define LEAPIORAID_SG_DEPTH LEAPIORAID_MAX_PHYS_SEGMENTS + + +#define LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT 15 +#define LEAPIORAID_CONFIG_COMMON_SGLFLAGS ((LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | \ + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | LEAPIORAID_SGE_FLAGS_END_OF_BUFFER \ + | LEAPIORAID_SGE_FLAGS_END_OF_LIST) << LEAPIORAID_SGE_FLAGS_SHIFT) +#define LEAPIORAID_CONFIG_COMMON_WRITE_SGLFLAGS ((LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | \ + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | LEAPIORAID_SGE_FLAGS_END_OF_BUFFER \ + | LEAPIORAID_SGE_FLAGS_END_OF_LIST | LEAPIORAID_SGE_FLAGS_HOST_TO_IOC) \ + << LEAPIORAID_SGE_FLAGS_SHIFT) + +#define LEAPIORAID_SATA_QUEUE_DEPTH (32) +#define LEAPIORAID_SAS_QUEUE_DEPTH (64) +#define LEAPIORAID_RAID_QUEUE_DEPTH (64) +#define LEAPIORAID_KDUMP_SCSI_IO_DEPTH (64) +#define LEAPIORAID_RAID_MAX_SECTORS (128) + +#define LEAPIORAID_NAME_LENGTH (32) +#define LEAPIORAID_DRIVER_NAME_LENGTH (24) +#define LEAPIORAID_STRING_LENGTH (64) + +#define LEAPIORAID_FRAME_START_OFFSET (256) +#define LEAPIORAID_REPLY_FREE_POOL_SIZE (512) +#define LEAPIORAID_MAX_CALLBACKS (32) +#define LEAPIORAID_MAX_HBA_NUM_PHYS (16) + +#define LEAPIORAID_INTERNAL_CMDS_COUNT (10) +#define LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT (3) +#define LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL (1) +#define LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY (2) + +#define LEAPIORAID_INVALID_DEVICE_HANDLE (0xFFFF) +#define LEAPIORAID_MAX_CHAIN_ELEMT_SZ (16) +#define LEAPIORAID_DEFAULT_NUM_FWCHAIN_ELEMTS (8) +#define LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY (30) +#define LEAPIORAID_READL_RETRY_COUNT_OF_THREE (3) + +#define LEAPIORAID_IOC_PRE_RESET_PHASE (1) +#define LEAPIORAID_IOC_AFTER_RESET_PHASE (2) +#define LEAPIORAID_IOC_DONE_RESET_PHASE (3) + +#define LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT (0x01) +#define LEAPIORAID_TARGET_FLAGS_VOLUME (0x02) +#define LEAPIORAID_TARGET_FASTPATH_IO (0x08) + +#define LEAPIORAID_DEVICE_HIGH_IOPS_DEPTH (8) +#define LEAPIORAID_HIGH_IOPS_REPLY_QUEUES (8) +#define LEAPIORAID_HIGH_IOPS_BATCH_COUNT (16) +#define LEAPIORAID_GEN35_MAX_MSIX_QUEUES (128) +#define LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK (16) + +#define LEAPIORAID_IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED (0x2810) + +#ifndef DID_TRANSPORT_DISRUPTED +#define DID_TRANSPORT_DISRUPTED DID_BUS_BUSY +#endif +#ifndef ULLONG_MAX +#define ULLONG_MAX (~0ULL) +#endif +#ifndef USHORT_MAX +#define USHORT_MAX ((u16)(~0U)) +#endif +#ifndef UINT_MAX +#define UINT_MAX (~0U) +#endif + +static inline void *leapioraid_shost_private(struct Scsi_Host *shost) +{ + return (void *)shost->hostdata; +} + +struct LeapioraidManuP10_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U8 OEMIdentifier; + U8 Reserved1; + U16 Reserved2; + U32 Reserved3; + U32 GenericFlags0; + U32 GenericFlags1; + U32 Reserved4; + U32 OEMSpecificFlags0; + U32 OEMSpecificFlags1; + U32 Reserved5[18]; +}; + +struct LeapioraidManuP11_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + __le32 Reserved1; + u8 Reserved2; + u8 EEDPTagMode; + u8 Reserved3; + u8 Reserved4; + __le32 Reserved5[8]; + u16 AddlFlags2; + u8 AddlFlags3; + u8 Reserved6; + __le32 Reserved7[7]; + u8 AbortTO; + u8 NumPerDevEvents; + u8 HostTraceBufferDecrementSizeKB; + u8 HostTraceBufferFlags; + u16 HostTraceBufferMaxSizeKB; + u16 HostTraceBufferMinSizeKB; + u8 CoreDumpTOSec; + u8 TimeSyncInterval; + u16 Reserved9; + __le32 Reserved10; +}; + +struct LEAPIORAID_TARGET { + struct scsi_target *starget; + u64 sas_address; + struct leapioraid_raid_device *raid_device; + u16 handle; + int num_luns; + u32 flags; + u8 deleted; + u8 tm_busy; + struct leapioraid_hba_port *port; + struct leapioraid_sas_device *sas_dev; +}; + +#define LEAPIORAID_DEVICE_FLAGS_INIT (0x01) +#define LEAPIORAID_DEVICE_TLR_ON (0x02) + +struct LEAPIORAID_DEVICE { + struct LEAPIORAID_TARGET *sas_target; + unsigned int lun; + u32 flags; + u8 configured_lun; + u8 block; + u8 deleted; + u8 tlr_snoop_check; + u8 ignore_delay_remove; + u8 ncq_prio_enable; + unsigned long ata_command_pending; +}; + +#define LEAPIORAID_CMND_PENDING_BIT (0) +#define LEAPIORAID_CMD_NOT_USED (0x8000) +#define LEAPIORAID_CMD_COMPLETE (0x0001) +#define LEAPIORAID_CMD_PENDING (0x0002) +#define LEAPIORAID_CMD_REPLY_VALID (0x0004) +#define LEAPIORAID_CMD_RESET (0x0008) +#define LEAPIORAID_CMD_COMPLETE_ASYNC (0x0010) + +struct leapioraid_internal_cmd { + struct mutex mutex; + struct completion done; + void *reply; + void *sense; + u16 status; + u16 smid; +}; + +struct leapioraid_scsi_io_transfer { + u16 handle; + u8 is_raid; + enum dma_data_direction dir; + u32 data_length; + dma_addr_t data_dma; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + u32 lun; + u8 cdb_length; + u8 cdb[32]; + u8 timeout; + u8 VF_ID; + u8 VP_ID; + u8 valid_reply; + u32 sense_length; + u16 ioc_status; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + u32 transfer_length; +}; + +struct leapioraid_internal_qcmd { + struct list_head list; + void *request; + void *reply; + void *sense; + u16 status; + u16 smid; + struct leapioraid_scsi_io_transfer *transfer_packet; +}; + +#define LEAPIORAID_WIDE_PORT_API (1) +#define LEAPIORAID_WIDE_PORT_API_PLUS (1) + +struct leapioraid_sas_device { + struct list_head list; + struct scsi_target *starget; + u64 sas_address; + u64 device_name; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u16 volume_handle; + u64 volume_wwid; + u32 device_info; + int id; + int channel; + u16 slot; + u8 phy; + u8 responding; + u8 fast_path; + u8 pfa_led_on; + struct kref refcount; + u8 *serial_number; + u8 pend_sas_rphy_add; + u8 enclosure_level; + u8 chassis_slot; + u8 is_chassis_slot_valid; + u8 connector_name[5]; + u8 ssd_device; + u8 supports_sata_smart; + u8 port_type; + struct leapioraid_hba_port *port; + struct sas_rphy *rphy; +}; + +static inline +void leapioraid_sas_device_get(struct leapioraid_sas_device *s) +{ + kref_get(&s->refcount); +} + +static inline +void leapioraid_sas_device_free(struct kref *r) +{ + kfree(container_of(r, struct leapioraid_sas_device, refcount)); +} + +static inline +void leapioraid_sas_device_put(struct leapioraid_sas_device *s) +{ + kref_put(&s->refcount, leapioraid_sas_device_free); +} + +struct leapioraid_raid_device { + struct list_head list; + struct scsi_target *starget; + struct scsi_device *sdev; + u64 wwid; + u16 handle; + u16 block_sz; + int id; + int channel; + u8 volume_type; + u8 num_pds; + u8 responding; + u8 percent_complete; + u8 direct_io_enabled; + u8 stripe_exponent; + u8 block_exponent; + u64 max_lba; + u32 stripe_sz; + u32 device_info; + u16 pd_handle[8]; +}; + +struct leapioraid_boot_device { + int channel; + void *device; +}; + +struct leapioraid_sas_port { + struct list_head port_list; + u8 num_phys; + struct leapioraid_hba_port *hba_port; + struct sas_identify remote_identify; + struct sas_rphy *rphy; +#if defined(LEAPIORAID_WIDE_PORT_API) + struct sas_port *port; +#endif + struct list_head phy_list; +}; + +struct leapioraid_sas_phy { + struct list_head port_siblings; + struct sas_identify identify; + struct sas_identify remote_identify; + struct sas_phy *phy; + u8 phy_id; + u16 handle; + u16 attached_handle; + u8 phy_belongs_to_port; + u8 hba_vphy; + struct leapioraid_hba_port *port; +}; + +struct leapioraid_raid_sas_node { + struct list_head list; + struct device *parent_dev; + u8 num_phys; + u64 sas_address; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u8 responding; + u8 nr_phys_allocated; + struct leapioraid_hba_port *port; + struct leapioraid_sas_phy *phy; + struct list_head sas_port_list; + struct sas_rphy *rphy; +}; + +struct leapioraid_enclosure_node { + struct list_head list; + struct LeapioraidSasEncP0_t pg0; +}; + +enum reset_type { + FORCE_BIG_HAMMER, + SOFT_RESET, +}; + +struct leapioraid_chain_tracker { + void *chain_buffer; + dma_addr_t chain_buffer_dma; +}; + +struct leapioraid_chain_lookup { + struct leapioraid_chain_tracker *chains_per_smid; + atomic_t chain_offset; +}; + +struct leapioraid_scsiio_tracker { + u16 smid; + struct scsi_cmnd *scmd; + u8 cb_idx; + u8 direct_io; + struct list_head chain_list; + u16 msix_io; +}; + +struct leapioraid_request_tracker { + u16 smid; + u8 cb_idx; + struct list_head tracker_list; +}; + +struct leapioraid_tr_list { + struct list_head list; + u16 handle; + u16 state; +}; + +struct leapioraid_sc_list { + struct list_head list; + u16 handle; +}; + +struct leapioraid_event_ack_list { + struct list_head list; + U16 Event; + U32 EventContext; +}; + +struct leapioraid_adapter_reply_queue { + struct LEAPIORAID_ADAPTER *ioc; + u8 msix_index; + u32 reply_post_host_index; + union LeapioraidRepDescUnion_t *reply_post_free; + char name[LEAPIORAID_NAME_LENGTH]; + atomic_t busy; + cpumask_var_t affinity_hint; + u32 os_irq; + struct irq_poll irqpoll; + bool irq_poll_scheduled; + bool irq_line_enable; + bool is_blk_mq_poll_q; + struct list_head list; +}; + +struct leapioraid_blk_mq_poll_queue { + atomic_t busy; + atomic_t pause; + struct leapioraid_adapter_reply_queue *reply_q; +}; + +union leapioraid_version_union { + struct LEAPIORAID_VERSION_STRUCT Struct; + u32 Word; +}; + +typedef void (*LEAPIORAID_ADD_SGE)(void *paddr, u32 flags_length, + dma_addr_t dma_addr); +typedef int (*LEAPIORAID_BUILD_SG_SCMD)(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid); +typedef void (*LEAPIORAID_BUILD_SG)(struct LEAPIORAID_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz); +typedef void (*LEAPIORAID_BUILD_ZERO_LEN_SGE)(struct LEAPIORAID_ADAPTER *ioc, + void *paddr); +typedef void (*PUT_SMID_IO_FP_HIP_TA)(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 funcdep); +typedef void (*PUT_SMID_DEFAULT)(struct LEAPIORAID_ADAPTER *ioc, u16 smid); +typedef u32(*BASE_READ_REG) (const void __iomem *addr, + u8 retry_count); +typedef u8(*GET_MSIX_INDEX) (struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd); + +struct leapioraid_facts { + u16 MsgVersion; + u16 HeaderVersion; + u8 IOCNumber; + u8 VP_ID; + u8 VF_ID; + u16 IOCExceptions; + u16 IOCStatus; + u32 IOCLogInfo; + u8 MaxChainDepth; + u8 WhoInit; + u8 NumberOfPorts; + u8 MaxMSIxVectors; + u16 RequestCredit; + u16 ProductID; + u32 IOCCapabilities; + union leapioraid_version_union FWVersion; + u16 IOCRequestFrameSize; + u16 IOCMaxChainSegmentSize; + u16 MaxInitiators; + u16 MaxTargets; + u16 MaxSasExpanders; + u16 MaxEnclosures; + u16 ProtocolFlags; + u16 HighPriorityCredit; + u16 MaxReplyDescriptorPostQueueDepth; + u8 ReplyFrameSize; + u8 MaxVolumes; + u16 MaxDevHandle; + u16 MaxPersistentEntries; + u16 MinDevHandle; + u8 CurrentHostPageSize; +}; + +struct leapioraid_port_facts { + u8 PortNumber; + u8 VP_ID; + u8 VF_ID; + u8 PortType; + u16 MaxPostedCmdBuffers; +}; + +struct leapioraid_reply_post_struct { + union LeapioraidRepDescUnion_t *reply_post_free; + dma_addr_t reply_post_free_dma; +}; + +struct leapioraid_virtual_phy { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 flags; +}; + +#define LEAPIORAID_VPHY_FLAG_DIRTY_PHY (0x01) +struct leapioraid_hba_port { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 port_id; + u8 flags; + u32 vphys_mask; + struct list_head vphys_list; +}; + +#define LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT (0x01) +#define LEAPIORAID_HBA_PORT_FLAG_NEW_PORT (0x02) +#define LEAPIORAID_MULTIPATH_DISABLED_PORT_ID (0xFF) + +typedef void (*LEAPIORAID_FLUSH_RUNNING_CMDS)(struct LEAPIORAID_ADAPTER * + ioc); + +struct LEAPIORAID_ADAPTER { + struct list_head list; + struct Scsi_Host *shost; + u8 id; + u8 IOCNumber; + int cpu_count; + char name[LEAPIORAID_NAME_LENGTH]; + char driver_name[LEAPIORAID_DRIVER_NAME_LENGTH]; + char tmp_string[LEAPIORAID_STRING_LENGTH]; + struct pci_dev *pdev; + struct LeapioraidSysInterfaceRegs_t __iomem *chip; + phys_addr_t chip_phys; + int logging_level; + int fwfault_debug; + u8 ir_firmware; + int bars; + u8 mask_interrupts; + struct mutex pci_access_mutex; + char fault_reset_work_q_name[48]; + char hba_hot_unplug_work_q_name[48]; + struct workqueue_struct *fault_reset_work_q; + struct workqueue_struct *hba_hot_unplug_work_q; + struct delayed_work fault_reset_work; + struct delayed_work hba_hot_unplug_work; + struct workqueue_struct *smart_poll_work_q; + struct delayed_work smart_poll_work; + u8 adapter_over_temp; + char firmware_event_name[48]; + struct workqueue_struct *firmware_event_thread; + spinlock_t fw_event_lock; + struct list_head fw_event_list; + struct leapioraid_fw_event_work *current_event; + u8 fw_events_cleanup; + int aen_event_read_flag; + u8 broadcast_aen_busy; + u16 broadcast_aen_pending; + u8 shost_recovery; + u8 got_task_abort_from_ioctl; + u8 got_task_abort_from_sysfs; + struct mutex reset_in_progress_mutex; + struct mutex hostdiag_unlock_mutex; + spinlock_t ioc_reset_in_progress_lock; + spinlock_t hba_hot_unplug_lock; + u8 ioc_link_reset_in_progress; + int ioc_reset_status; + u8 ignore_loginfos; + u8 remove_host; + u8 pci_error_recovery; + u8 wait_for_discovery_to_complete; + u8 is_driver_loading; + u8 port_enable_failed; + u8 start_scan; + u16 start_scan_failed; + u8 msix_enable; + u8 *cpu_msix_table; + resource_size_t **reply_post_host_index; + u16 cpu_msix_table_sz; + u32 ioc_reset_count; + LEAPIORAID_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; + u32 non_operational_loop; + u8 ioc_coredump_loop; + u32 timestamp_update_count; + u32 time_sync_interval; + u8 multipath_on_hba; + atomic64_t total_io_cnt; + atomic64_t high_iops_outstanding; + bool msix_load_balance; + u16 thresh_hold; + u8 high_iops_queues; + u8 iopoll_q_start_index; + u32 drv_internal_flags; + u32 drv_support_bitmap; + u32 dma_mask; + bool enable_sdev_max_qd; + bool use_32bit_dma; + struct leapioraid_blk_mq_poll_queue *blk_mq_poll_queues; + u8 scsi_io_cb_idx; + u8 tm_cb_idx; + u8 transport_cb_idx; + u8 scsih_cb_idx; + u8 ctl_cb_idx; + u8 ctl_tm_cb_idx; + u8 base_cb_idx; + u8 port_enable_cb_idx; + u8 config_cb_idx; + u8 tm_tr_cb_idx; + u8 tm_tr_volume_cb_idx; + u8 tm_tr_internal_cb_idx; + u8 tm_sas_control_cb_idx; + struct leapioraid_internal_cmd base_cmds; + struct leapioraid_internal_cmd port_enable_cmds; + struct leapioraid_internal_cmd transport_cmds; + struct leapioraid_internal_cmd scsih_cmds; + struct leapioraid_internal_cmd tm_cmds; + struct leapioraid_internal_cmd ctl_cmds; + struct leapioraid_internal_cmd config_cmds; + struct list_head scsih_q_intenal_cmds; + spinlock_t scsih_q_internal_lock; + LEAPIORAID_ADD_SGE base_add_sg_single; + LEAPIORAID_BUILD_SG_SCMD build_sg_scmd; + LEAPIORAID_BUILD_SG build_sg; + LEAPIORAID_BUILD_ZERO_LEN_SGE build_zero_len_sge; + u16 sge_size_ieee; + LEAPIORAID_BUILD_SG build_sg_mpi; + LEAPIORAID_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi; + u32 event_type[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; + u32 event_context; + void *event_log; + u32 event_masks[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; + u8 disable_eedp_support; + u8 tm_custom_handling; + u16 max_shutdown_latency; + u16 max_wideport_qd; + u16 max_narrowport_qd; + u8 max_sata_qd; + struct leapioraid_facts facts; + struct leapioraid_facts prev_fw_facts; + struct leapioraid_port_facts *pfacts; + struct LeapioraidManP0_t manu_pg0; + struct LeapioraidManuP10_t manu_pg10; + struct LeapioraidManuP11_t manu_pg11; + struct LeapioraidBiosP2_t bios_pg2; + struct LeapioraidBiosP3_t bios_pg3; + struct LeapioraidIOCP8_t ioc_pg8; + struct LeapioraidIOUnitP0_t iounit_pg0; + struct LeapioraidIOUnitP1_t iounit_pg1; + struct LeapioraidIOUnitP8_t iounit_pg8; + struct LeapioraidIOCP1_t ioc_pg1_copy; + struct leapioraid_boot_device req_boot_device; + struct leapioraid_boot_device req_alt_boot_device; + struct leapioraid_boot_device current_boot_device; + struct leapioraid_raid_sas_node sas_hba; + struct list_head sas_expander_list; + struct list_head enclosure_list; + spinlock_t sas_node_lock; + struct list_head sas_device_list; + struct list_head sas_device_init_list; + spinlock_t sas_device_lock; + struct list_head pcie_device_list; + struct list_head pcie_device_init_list; + spinlock_t pcie_device_lock; + struct list_head raid_device_list; + spinlock_t raid_device_lock; + u8 io_missing_delay; + u16 device_missing_delay; + int sas_id; + int pcie_target_id; + void *blocking_handles; + void *pd_handles; + u16 pd_handles_sz; + void *pend_os_device_add; + u16 pend_os_device_add_sz; + u16 config_page_sz; + void *config_page; + dma_addr_t config_page_dma; + void *config_vaddr; + u16 hba_queue_depth; + u16 sge_size; + u16 scsiio_depth; + u16 request_sz; + u8 *request; + dma_addr_t request_dma; + u32 request_dma_sz; + spinlock_t scsi_lookup_lock; + int pending_io_count; + wait_queue_head_t reset_wq; + int pending_tm_count; + u32 terminated_tm_count; + wait_queue_head_t pending_tm_wq; + u8 out_of_frames; + wait_queue_head_t no_frames_tm_wq; + u16 *io_queue_num; + u32 page_size; + struct leapioraid_chain_lookup *chain_lookup; + struct list_head free_chain_list; + struct dma_pool *chain_dma_pool; + u16 max_sges_in_main_message; + u16 max_sges_in_chain_message; + u16 chains_needed_per_io; + u16 chain_segment_sz; + u16 chains_per_prp_buffer; + u16 hi_priority_smid; + u8 *hi_priority; + dma_addr_t hi_priority_dma; + u16 hi_priority_depth; + struct leapioraid_request_tracker *hpr_lookup; + struct list_head hpr_free_list; + u16 internal_smid; + u8 *internal; + dma_addr_t internal_dma; + u16 internal_depth; + struct leapioraid_request_tracker *internal_lookup; + struct list_head internal_free_list; + u8 *sense; + dma_addr_t sense_dma; + struct dma_pool *sense_dma_pool; + u16 reply_sz; + u8 *reply; + dma_addr_t reply_dma; + u32 reply_dma_max_address; + u32 reply_dma_min_address; + struct dma_pool *reply_dma_pool; + u16 reply_free_queue_depth; + __le32 *reply_free; + dma_addr_t reply_free_dma; + struct dma_pool *reply_free_dma_pool; + u32 reply_free_host_index; + u16 reply_post_queue_depth; + struct leapioraid_reply_post_struct *reply_post; + struct dma_pool *reply_post_free_dma_pool; + struct dma_pool *reply_post_free_array_dma_pool; + struct LeapioraidIOCInitRDPQArrayEntry *reply_post_free_array; + dma_addr_t reply_post_free_array_dma; + u8 reply_queue_count; + struct list_head reply_queue_list; + u8 rdpq_array_capable; + u8 rdpq_array_enable; + u8 rdpq_array_enable_assigned; + u8 combined_reply_queue; + u8 nc_reply_index_count; + u8 smp_affinity_enable; + resource_size_t **replyPostRegisterIndex; + struct list_head delayed_tr_list; + struct list_head delayed_tr_volume_list; + struct list_head delayed_internal_tm_list; + struct list_head delayed_sc_list; + struct list_head delayed_event_ack_list; + u32 ring_buffer_offset; + u32 ring_buffer_sz; + u8 reset_from_user; + u8 hide_ir_msg; + u8 warpdrive_msg; + u8 mfg_pg10_hide_flag; + u8 hide_drives; + u8 atomic_desc_capable; + BASE_READ_REG base_readl; + PUT_SMID_IO_FP_HIP_TA put_smid_scsi_io; + PUT_SMID_IO_FP_HIP_TA put_smid_fast_path; + PUT_SMID_IO_FP_HIP_TA put_smid_hi_priority; + PUT_SMID_DEFAULT put_smid_default; + GET_MSIX_INDEX get_msix_index_for_smlio; + void *device_remove_in_progress; + u16 device_remove_in_progress_sz; + u8 *tm_tr_retry; + u32 tm_tr_retry_sz; + u8 temp_sensors_count; + struct list_head port_table_list; + u8 *log_buffer; + dma_addr_t log_buffer_dma; + char pcie_log_work_q_name[48]; + struct workqueue_struct *pcie_log_work_q; + struct delayed_work pcie_log_work; + u32 open_pcie_trace; +}; + +#define LEAPIORAID_DEBUG (0x00000001) +#define LEAPIORAID_DEBUG_MSG_FRAME (0x00000002) +#define LEAPIORAID_DEBUG_SG (0x00000004) +#define LEAPIORAID_DEBUG_EVENTS (0x00000008) +#define LEAPIORAID_DEBUG_EVENT_WORK_TASK (0x00000010) +#define LEAPIORAID_DEBUG_INIT (0x00000020) +#define LEAPIORAID_DEBUG_EXIT (0x00000040) +#define LEAPIORAID_DEBUG_FAIL (0x00000080) +#define LEAPIORAID_DEBUG_TM (0x00000100) +#define LEAPIORAID_DEBUG_REPLY (0x00000200) +#define LEAPIORAID_DEBUG_HANDSHAKE (0x00000400) +#define LEAPIORAID_DEBUG_CONFIG (0x00000800) +#define LEAPIORAID_DEBUG_DL (0x00001000) +#define LEAPIORAID_DEBUG_RESET (0x00002000) +#define LEAPIORAID_DEBUG_SCSI (0x00004000) +#define LEAPIORAID_DEBUG_IOCTL (0x00008000) +#define LEAPIORAID_DEBUG_CSMISAS (0x00010000) +#define LEAPIORAID_DEBUG_SAS (0x00020000) +#define LEAPIORAID_DEBUG_TRANSPORT (0x00040000) +#define LEAPIORAID_DEBUG_TASK_SET_FULL (0x00080000) + +#define LEAPIORAID_CHECK_LOGGING(IOC, CMD, BITS) \ +{ \ + if (IOC->logging_level & BITS) \ + CMD; \ +} + +#define dprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG) +#define dsgprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SG) +#define devtprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EVENTS) +#define dewtprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EVENT_WORK_TASK) +#define dinitprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_INIT) +#define dexitprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EXIT) +#define dfailprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_FAIL) +#define dtmprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TM) +#define dreplyprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_REPLY) +#define dhsprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_HANDSHAKE) +#define dcprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_CONFIG) +#define ddlprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_DL) +#define drsprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_RESET) +#define dsprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SCSI) +#define dctlprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_IOCTL) +#define dcsmisasprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_CSMISAS) +#define dsasprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SAS) +#define dsastransport(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SAS_WIDE) +#define dmfprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_MSG_FRAME) +#define dtsfprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TASK_SET_FULL) +#define dtransportprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TRANSPORT) + +static inline void +leapioraid_debug_dump_mf(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *) mpi_request; + + pr_info("mf:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +static inline void +leapioraid_debug_dump_reply(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *) mpi_request; + + pr_info("reply:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +static inline void +leapioraid_debug_dump_config(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *) mpi_request; + + pr_info("config:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +#define LEAPIORAID_DRV_INTERNAL_BITMAP_BLK_MQ (0x00000001) +#define LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED (0x00000002) + +typedef u8(*LEAPIORAID_CALLBACK) (struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); + +#define SCSIH_MAP_QUEUE(shost) static void leapioraid_scsihost_map_queues(shost) + +extern struct list_head leapioraid_ioc_list; +extern spinlock_t leapioraid_gioc_lock; +void leapioraid_base_start_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_stop_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_start_log_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_stop_log_watchdog(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_trace_log_init(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_attach(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_detach(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_map_resources(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_free_resources(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_free_enclosure_list(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_hard_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type); +void *leapioraid_base_get_msg_frame(struct LEAPIORAID_ADAPTER *ioc, u16 smid); +void *leapioraid_base_get_sense_buffer(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +__le32 leapioraid_base_get_sense_buffer_dma(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +__le64 leapioraid_base_get_sense_buffer_dma_64(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +void leapioraid_base_sync_reply_irqs(struct LEAPIORAID_ADAPTER *ioc, u8 poll); +u16 leapioraid_base_get_smid_hpr(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx); +u16 leapioraid_base_get_smid_scsiio(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd); +u16 leapioraid_base_get_smid(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx); +void leapioraid_base_free_smid(struct LEAPIORAID_ADAPTER *ioc, u16 smid); +void leapioraid_base_initialize_callback_handler(void); +u8 leapioraid_base_register_callback_handler(LEAPIORAID_CALLBACK cb_func); +void leapioraid_base_release_callback_handler(u8 cb_idx); +u8 leapioraid_base_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 leapioraid_port_enable_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +void *leapioraid_base_get_reply_virt_addr(struct LEAPIORAID_ADAPTER *ioc, + u32 phys_addr); +u32 leapioraid_base_get_iocstate(struct LEAPIORAID_ADAPTER *ioc, int cooked); +int leapioraid_base_check_and_get_msix_vectors(struct pci_dev *pdev); +void leapioraid_base_fault_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code); +#define leapioraid_print_fault_code(ioc, fault_code) \ + do { \ + pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + leapioraid_base_fault_info(ioc, fault_code); \ + } while (0) +void leapioraid_base_coredump_info(struct LEAPIORAID_ADAPTER *ioc, + u16 fault_code); +int leapioraid_base_wait_for_coredump_completion(struct LEAPIORAID_ADAPTER *ioc, + const char *caller); +int leapioraid_base_sas_iounit_control(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSasIoUnitControlRep_t * + mpi_reply, + struct LeapioraidSasIoUnitControlReq_t * + mpi_request); +int leapioraid_base_scsi_enclosure_processor(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSepRep_t *mpi_reply, + struct LeapioraidSepReq_t *mpi_request); +void leapioraid_base_validate_event_type(struct LEAPIORAID_ADAPTER *ioc, + u32 *event_type); +void leapioraid_halt_firmware(struct LEAPIORAID_ADAPTER *ioc, u8 set_fault); +struct leapioraid_scsiio_tracker *leapioraid_get_st_from_smid( + struct LEAPIORAID_ADAPTER *ioc, u16 smid); +void leapioraid_base_clear_st(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsiio_tracker *st); +struct leapioraid_scsiio_tracker *leapioraid_base_scsi_cmd_priv( + struct scsi_cmnd *scmd); +int +leapioraid_base_check_for_fault_and_issue_reset(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_port_enable(struct LEAPIORAID_ADAPTER *ioc); +u8 leapioraid_base_pci_device_is_unplugged(struct LEAPIORAID_ADAPTER *ioc); +u8 leapioraid_base_pci_device_is_available(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_free_irq(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_disable_msix(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_wait_for_commands_to_complete(struct LEAPIORAID_ADAPTER *ioc); +u8 leapioraid_base_check_cmd_timeout(struct LEAPIORAID_ADAPTER *ioc, + u8 status, void *mpi_request, int sz); +#define leapioraid_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \ + do { \ + pr_err("%s In func: %s\n", ioc->name, __func__); \ + issue_reset = leapioraid_base_check_cmd_timeout(ioc, status, mpi_request, sz); \ + } while (0) +int leapioraid_wait_for_ioc_to_operational(struct LEAPIORAID_ADAPTER *ioc, + int wait_count); +void leapioraid_base_start_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_stop_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_make_ioc_ready(struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type); +void leapioraid_base_mask_interrupts(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_unmask_interrupts(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); +void leapioraid_base_pause_mq_polling(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_resume_mq_polling(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_unlock_and_get_host_diagnostic(struct LEAPIORAID_ADAPTER + *ioc, u32 *host_diagnostic); +void leapioraid_base_lock_host_diagnostic(struct LEAPIORAID_ADAPTER *ioc); +extern char driver_name[LEAPIORAID_NAME_LENGTH]; +struct scsi_cmnd *leapioraid_scsihost_scsi_lookup_get(struct LEAPIORAID_ADAPTER + *ioc, u16 smid); +u8 leapioraid_scsihost_event_callback(struct LEAPIORAID_ADAPTER *ioc, + u8 msix_index, u32 reply); +void leapioraid_scsihost_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + int reset_phase); +int leapioraid_scsihost_issue_tm(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task, u8 timeout, u8 tr_method); +int leapioraid_scsihost_issue_locked_tm(struct LEAPIORAID_ADAPTER *ioc, + u16 handle, uint channel, uint id, + uint lun, u8 type, u16 smid_task, + u8 timeout, u8 tr_method); +void leapioraid_scsihost_set_tm_flag(struct LEAPIORAID_ADAPTER *ioc, + u16 handle); +void leapioraid_scsihost_clear_tm_flag(struct LEAPIORAID_ADAPTER *ioc, + u16 handle); +void leapioraid_expander_remove( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + struct leapioraid_hba_port *port); +void leapioraid_device_remove_by_sas_address(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port); +u8 leapioraid_check_for_pending_internal_cmds(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +struct leapioraid_hba_port *leapioraid_get_port_by_id( + struct LEAPIORAID_ADAPTER *ioc, u8 port, u8 skip_dirty_flag); +struct leapioraid_virtual_phy *leapioraid_get_vphy_by_phy( + struct LEAPIORAID_ADAPTER *ioc, struct leapioraid_hba_port *port, u32 phy); +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port); +struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr_and_rphy( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct sas_rphy *rphy); +struct leapioraid_sas_device *leapioraid_get_sdev_by_addr( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port); +struct leapioraid_sas_device *leapioraid_get_sdev_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +void leapioraid_scsihost_flush_running_cmds(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_port_enable_complete(struct LEAPIORAID_ADAPTER *ioc); +struct leapioraid_raid_device *leapioraid_raid_device_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +void leapioraid_scsihost_sas_device_remove(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device); +void leapioraid_scsihost_clear_outstanding_scsi_tm_commands( + struct LEAPIORAID_ADAPTER *ioc); +u32 leapioraid_base_mod64(u64 dividend, u32 divisor); +void +leapioraid__scsihost_change_queue_depth(struct scsi_device *sdev, int qdepth); +u8 leapioraid_scsihost_ncq_prio_supp(struct scsi_device *sdev); +u8 leapioraid_config_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +int leapioraid_config_get_number_hba_phys(struct LEAPIORAID_ADAPTER *ioc, + u8 *num_phys); +int leapioraid_config_get_manufacturing_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManP0_t * + config_page); +int leapioraid_config_get_manufacturing_pg10(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP10_t + *config_page); +int leapioraid_config_get_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page); +int leapioraid_config_set_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page); +int leapioraid_config_get_bios_pg2(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP2_t *config_page); +int leapioraid_config_get_bios_pg3(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP3_t *config_page); +int leapioraid_config_get_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP0_t *config_page); +int leapioraid_config_get_sas_device_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasDevP0_t *config_page, + u32 form, u32 handle); +int leapioraid_config_get_sas_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP0_t *config_page, + u16 sz); +int leapioraid_config_get_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page); +int leapioraid_config_set_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page); +int leapioraid_config_get_iounit_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP8_t *config_page); +int leapioraid_config_get_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz); +int leapioraid_config_set_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz); +int leapioraid_config_get_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page); +int leapioraid_config_set_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page); +int leapioraid_config_get_ioc_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP8_t *config_page); +int leapioraid_config_get_expander_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP0_t *config_page, + u32 form, u32 handle); +int leapioraid_config_get_expander_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP1_t *config_page, + u32 phy_number, u16 handle); +int leapioraid_config_get_enclosure_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasEncP0_t * + config_page, u32 form, u32 handle); +int leapioraid_config_get_phy_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP0_t *config_page, + u32 phy_number); +int leapioraid_config_get_phy_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP1_t *config_page, + u32 phy_number); +int leapioraid_config_get_raid_volume_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP1_t *config_page, + u32 form, u32 handle); +int leapioraid_config_get_number_pds(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 *num_pds); +int leapioraid_config_get_raid_volume_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP0_t *config_page, + u32 form, u32 handle, u16 sz); +int leapioraid_config_get_phys_disk_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidPDP0_t * + config_page, u32 form, + u32 form_specific); +int leapioraid_config_get_volume_handle(struct LEAPIORAID_ADAPTER *ioc, + u16 pd_handle, u16 *volume_handle); +int leapioraid_config_get_volume_wwid(struct LEAPIORAID_ADAPTER *ioc, + u16 volume_handle, u64 *wwid); +extern const struct attribute_group *leapioraid_host_groups[]; +extern const struct attribute_group *leapioraid_dev_groups[]; +void leapioraid_ctl_init(void); +void leapioraid_ctl_exit(void); +u8 leapioraid_ctl_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 leapioraid_ctl_tm_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +void leapioraid_ctl_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + int reset_phase); +u8 leapioraid_ctl_event_callback(struct LEAPIORAID_ADAPTER *ioc, u8 msix_index, + u32 reply); +void leapioraid_ctl_add_to_event_log(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventNotificationRep_t * + mpi_reply); +void leapioraid_ctl_clear_outstanding_ioctls(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_ctl_release(struct inode *inode, struct file *filep); +void ctl_init(void); +void ctl_exit(void); +u8 leapioraid_transport_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +struct leapioraid_sas_port *leapioraid_transport_port_add( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u64 sas_address, + struct leapioraid_hba_port *port); +void leapioraid_transport_port_remove(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u64 sas_address_parent, + struct leapioraid_hba_port *port); +int leapioraid_transport_add_host_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidSasPhyP0_t phy_pg0, + struct device *parent_dev); +int leapioraid_transport_add_expander_phy(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidExpanderP1_t expander_pg1, + struct device *parent_dev); +void leapioraid_transport_update_links(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u16 handle, + u8 phy_number, u8 link_rate, + struct leapioraid_hba_port *port); +extern struct sas_function_template leapioraid_transport_functions; +extern struct scsi_transport_template *leapioraid_transport_template; +void +leapioraid_transport_del_phy_from_an_existing_port(struct LEAPIORAID_ADAPTER + *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy + *leapioraid_phy); +#if defined(LEAPIORAID_WIDE_PORT_API) +void +leapioraid_transport_add_phy_to_an_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy + *leapioraid_phy, + u64 sas_address, + struct leapioraid_hba_port *port); +#endif +#endif diff --git a/drivers/scsi/leapioraid/leapioraid_os.c b/drivers/scsi/leapioraid/leapioraid_os.c new file mode 100644 index 000000000000..368a3c859a04 --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_os.c @@ -0,0 +1,9823 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" +#include + +#define RAID_CHANNEL 1 + +static void leapioraid_scsihost_expander_node_remove( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander); +static void leapioraid_firmware_event_work(struct work_struct *work); +static void leapioraid_firmware_event_work_delayed(struct work_struct *work); +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_sn(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 **serial_number); +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_supported_pages(struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u32 lun, void *data, + u32 data_length); +static enum device_responsive_state leapioraid_scsihost_ata_pass_thru_idd( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, + u8 *is_ssd_device, + u8 tr_timeout, + u8 tr_method); +static enum device_responsive_state +leapioraid_scsihost_wait_for_target_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + u8 tr_timeout, u8 tr_method); +static enum device_responsive_state +leapioraid_scsihost_wait_for_device_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + int lun, u8 tr_timeout, u8 tr_method); +static void leapioraid_scsihost_remove_device( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device); +static int leapioraid_scsihost_add_device( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd); +static u8 leapioraid_scsihost_check_for_pending_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 smid); +static void leapioraid_scsihost_send_event_to_turn_on_pfa_led( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +static void leapioraid_scsihost_complete_devices_scanning( + struct LEAPIORAID_ADAPTER *ioc); + +LIST_HEAD(leapioraid_ioc_list); +DEFINE_SPINLOCK(leapioraid_gioc_lock); + +MODULE_AUTHOR(LEAPIORAID_AUTHOR); +MODULE_DESCRIPTION(LEAPIORAID_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_VERSION(LEAPIORAID_DRIVER_VERSION); + +static u8 scsi_io_cb_idx = -1; +static u8 tm_cb_idx = -1; +static u8 ctl_cb_idx = -1; +static u8 ctl_tm_cb_idx = -1; +static u8 base_cb_idx = -1; +static u8 port_enable_cb_idx = -1; +static u8 transport_cb_idx = -1; +static u8 scsih_cb_idx = -1; +static u8 config_cb_idx = -1; +static int leapioraid_ids; +static u8 tm_tr_cb_idx = -1; +static u8 tm_tr_volume_cb_idx = -1; +static u8 tm_tr_internal_cb_idx = -1; +static u8 tm_sas_control_cb_idx = -1; +static u32 logging_level; + +MODULE_PARM_DESC(logging_level, + " bits for enabling additional logging info (default=0)"); + +static int open_pcie_trace; +module_param(open_pcie_trace, int, 0444); +MODULE_PARM_DESC(open_pcie_trace, "open_pcie_trace: open=1/default=0(close)"); + +static int disable_discovery = -1; +module_param(disable_discovery, int, 0444); +MODULE_PARM_DESC(disable_discovery, "disable discovery"); + +static struct raid_template *leapioraid_raid_template; + +enum device_responsive_state { + DEVICE_READY, + DEVICE_RETRY, + DEVICE_RETRY_UA, + DEVICE_START_UNIT, + DEVICE_STOP_UNIT, + DEVICE_ERROR, +}; + +struct sense_info { + u8 skey; + u8 asc; + u8 ascq; +}; + +#define LEAPIORAID_TURN_ON_PFA_LED (0xFFFC) +#define LEAPIORAID_PORT_ENABLE_COMPLETE (0xFFFD) +#define LEAPIORAID_REMOVE_UNRESPONDING_DEVICES (0xFFFF) + +struct leapioraid_fw_event_work { + struct list_head list; + struct work_struct work; + u8 cancel_pending_work; + struct delayed_work delayed_work; + u8 delayed_work_active; + struct LEAPIORAID_ADAPTER *ioc; + u16 device_handle; + u8 VF_ID; + u8 VP_ID; + u8 ignore; + u16 event; + struct kref refcount; + void *event_data; + u8 *retries; +}; + +static void +leapioraid_fw_event_work_free(struct kref *r) +{ + struct leapioraid_fw_event_work *fw_work; + + fw_work = container_of( + r, struct leapioraid_fw_event_work, refcount); + kfree(fw_work->event_data); + kfree(fw_work->retries); + kfree(fw_work); +} + +static void +leapioraid_fw_event_work_get( + struct leapioraid_fw_event_work *fw_work) +{ + kref_get(&fw_work->refcount); +} + +static void +leapioraid_fw_event_work_put(struct leapioraid_fw_event_work *fw_work) +{ + kref_put(&fw_work->refcount, leapioraid_fw_event_work_free); +} + +static +struct leapioraid_fw_event_work *leapioraid_alloc_fw_event_work(int len) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); + if (!fw_event) + return NULL; + kref_init(&fw_event->refcount); + return fw_event; +} + +static int +leapioraid_scsihost_set_debug_level( + const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct LEAPIORAID_ADAPTER *ioc; + + if (ret) + return ret; + pr_info("setting logging_level(0x%08x)\n", logging_level); + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) + ioc->logging_level = logging_level; + spin_unlock(&leapioraid_gioc_lock); + return 0; +} + +module_param_call(logging_level, + leapioraid_scsihost_set_debug_level, param_get_int, + &logging_level, 0644); + +static inline int +leapioraid_scsihost_srch_boot_sas_address(u64 sas_address, + struct LEAPIORAID_BOOT_DEVICE_SAS_WWID *boot_device) +{ + return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; +} + +static inline int +leapioraid_scsihost_srch_boot_device_name(u64 device_name, + struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME *boot_device) +{ + return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; +} + +static inline int +leapioraid_scsihost_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, + struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT *boot_device) +{ + return (enclosure_logical_id == + le64_to_cpu(boot_device->EnclosureLogicalID) + && slot_number == le16_to_cpu(boot_device->SlotNumber)) ? 1 : 0; +} + +static void +leapioraid_scsihost_display_enclosure_chassis_info( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device, + struct scsi_device *sdev, + struct scsi_target *starget) +{ + if (sdev) { + if (sas_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", + sas_device->chassis_slot); + } else if (starget) { + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + starget_printk(KERN_INFO, starget, + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + starget_printk(KERN_INFO, starget, + "chassis slot(0x%04x)\n", sas_device->chassis_slot); + } else { + if (sas_device->enclosure_handle != 0) + pr_info("%s enclosure logical id(0x%016llx), slot(%d)\n", + ioc->name, + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + pr_info("%s enclosure level(0x%04x),connector name( %s)\n", + ioc->name, + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + pr_info("%s chassis slot(0x%04x)\n", + ioc->name, sas_device->chassis_slot); + } +} + +struct leapioraid_hba_port *leapioraid_get_port_by_id( + struct LEAPIORAID_ADAPTER *ioc, + u8 port_id, u8 skip_dirty_flag) +{ + struct leapioraid_hba_port *port, *port_next; + + if (!ioc->multipath_on_hba) + port_id = LEAPIORAID_MULTIPATH_DISABLED_PORT_ID; + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (port->port_id != port_id) + continue; + if (port->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) + continue; + return port; + } + if (skip_dirty_flag) { + port = port_next = NULL; + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (port->port_id != port_id) + continue; + return port; + } + } + if (unlikely(!ioc->multipath_on_hba)) { + port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_ATOMIC); + if (!port) + return NULL; + + port->port_id = LEAPIORAID_MULTIPATH_DISABLED_PORT_ID; + pr_err( + "%s hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + list_add_tail(&port->list, &ioc->port_table_list); + return port; + } + return NULL; +} + +struct leapioraid_virtual_phy *leapioraid_get_vphy_by_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port, u32 phy) +{ + struct leapioraid_virtual_phy *vphy, *vphy_next; + + if (!port->vphys_mask) + return NULL; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { + if (vphy->phy_mask & (1 << phy)) + return vphy; + } + return NULL; +} + +static int +leapioraid_scsihost_is_boot_device(u64 sas_address, u64 device_name, + u64 enclosure_logical_id, u16 slot, u8 form, + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE *boot_device) +{ + int rc = 0; + + switch (form) { + case LEAPIORAID_BIOSPAGE2_FORM_SAS_WWID: + if (!sas_address) + break; + rc = leapioraid_scsihost_srch_boot_sas_address(sas_address, + &boot_device->SasWwid); + break; + case LEAPIORAID_BIOSPAGE2_FORM_ENCLOSURE_SLOT: + if (!enclosure_logical_id) + break; + rc = leapioraid_scsihost_srch_boot_encl_slot( + enclosure_logical_id, + slot, + &boot_device->EnclosureSlot); + break; + case LEAPIORAID_BIOSPAGE2_FORM_DEVICE_NAME: + if (!device_name) + break; + rc = leapioraid_scsihost_srch_boot_device_name(device_name, + &boot_device->DeviceName); + break; + case LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: + break; + } + return rc; +} + +static int +leapioraid_scsihost_get_sas_address( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u64 *sas_address) +{ + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 ioc_status; + + *sas_address = 0; + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) { + if ((handle <= ioc->sas_hba.num_phys) && + (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & + LEAPIORAID_SAS_DEVICE_INFO_SEP))) + *sas_address = ioc->sas_hba.sas_address; + else + *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + return 0; + } + if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE) + return -ENXIO; + pr_err("%s handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", + ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); + return -EIO; +} + +static void +leapioraid_scsihost_determine_boot_device( + struct LEAPIORAID_ADAPTER *ioc, void *device, + u32 channel) +{ + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + u64 sas_address; + u64 device_name; + u64 enclosure_logical_id; + u16 slot; + + if (!ioc->is_driver_loading) + return; + if (!ioc->bios_pg3.BiosVersion) + return; + if (channel == RAID_CHANNEL) { + raid_device = device; + sas_address = raid_device->wwid; + device_name = 0; + enclosure_logical_id = 0; + slot = 0; + } else { + sas_device = device; + sas_address = sas_device->sas_address; + device_name = sas_device->device_name; + enclosure_logical_id = sas_device->enclosure_logical_id; + slot = sas_device->slot; + } + if (!ioc->req_boot_device.device) { + if (leapioraid_scsihost_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedBootDevice)) { + dinitprintk(ioc, + pr_err( + "%s %s: req_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->req_boot_device.device = device; + ioc->req_boot_device.channel = channel; + } + } + if (!ioc->req_alt_boot_device.device) { + if (leapioraid_scsihost_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqAltBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedAltBootDevice)) { + dinitprintk(ioc, + pr_err( + "%s %s: req_alt_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->req_alt_boot_device.device = device; + ioc->req_alt_boot_device.channel = channel; + } + } + if (!ioc->current_boot_device.device) { + if (leapioraid_scsihost_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.CurrentBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.CurrentBootDevice)) { + dinitprintk(ioc, + pr_err( + "%s %s: current_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->current_boot_device.device = device; + ioc->current_boot_device.channel = channel; + } + } +} + +static +struct leapioraid_sas_device *__leapioraid_get_sdev_from_target( + struct LEAPIORAID_ADAPTER *ioc, + struct LEAPIORAID_TARGET *tgt_priv) +{ + struct leapioraid_sas_device *ret; + + assert_spin_locked(&ioc->sas_device_lock); + ret = tgt_priv->sas_dev; + if (ret) + leapioraid_sas_device_get(ret); + return ret; +} + +static +struct leapioraid_sas_device *leapioraid_get_sdev_from_target( + struct LEAPIORAID_ADAPTER *ioc, + struct LEAPIORAID_TARGET *tgt_priv) +{ + struct leapioraid_sas_device *ret; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + ret = __leapioraid_get_sdev_from_target(ioc, tgt_priv); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return ret; +} + +static +struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_device *sas_device; + + if (!port) + return NULL; + assert_spin_locked(&ioc->sas_device_lock); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->sas_address == sas_address && + sas_device->port == port) + goto found_device; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->sas_address == sas_address && + sas_device->port == port) + goto found_device; + return NULL; +found_device: + leapioraid_sas_device_get(sas_device); + return sas_device; +} + +struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr_and_rphy( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct sas_rphy *rphy) +{ + struct leapioraid_sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->sas_address == sas_address && + (sas_device->rphy == rphy)) + goto found_device; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->sas_address == sas_address && + (sas_device->rphy == rphy)) + goto found_device; + return NULL; +found_device: + leapioraid_sas_device_get(sas_device); + return sas_device; +} + +struct leapioraid_sas_device *leapioraid_get_sdev_by_addr( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_device *sas_device = NULL; + unsigned long flags; + + if (!port) + return sas_device; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return sas_device; +} + +static struct leapioraid_sas_device *__leapioraid_get_sdev_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->handle == handle) + goto found_device; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->handle == handle) + goto found_device; + return NULL; +found_device: + leapioraid_sas_device_get(sas_device); + return sas_device; +} + +struct leapioraid_sas_device *leapioraid_get_sdev_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return sas_device; +} + +void +leapioraid_scsihost_sas_device_remove(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + int was_on_sas_device_list = 0; + + if (!sas_device) + return; + pr_info("%s %s: removing handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address); + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, sas_device, NULL, NULL); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (was_on_sas_device_list) { + kfree(sas_device->serial_number); + leapioraid_sas_device_put(sas_device); + } +} + +static void +leapioraid_scsihost_device_remove_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int was_on_sas_device_list = 0; + + if (ioc->shost_recovery) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (was_on_sas_device_list) { + leapioraid_scsihost_remove_device(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + } +} + +void +leapioraid_device_remove_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int was_on_sas_device_list = 0; + + if (ioc->shost_recovery) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port); + if (sas_device) { + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (was_on_sas_device_list) { + leapioraid_scsihost_remove_device(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + } +} + +static void +leapioraid_scsihost_sas_device_add( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + leapioraid_sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (ioc->hide_drives) { + clear_bit(sas_device->handle, ioc->pend_os_device_add); + return; + } + if (!leapioraid_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent, + sas_device->port)) { + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + leapioraid_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } + } else + clear_bit(sas_device->handle, ioc->pend_os_device_add); +} + +static void +leapioraid_scsihost_sas_device_init_add( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + leapioraid_sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_init_list); + leapioraid_scsihost_determine_boot_device(ioc, sas_device, 0); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static +struct leapioraid_raid_device *leapioraid_scsihost_raid_device_find_by_id( + struct LEAPIORAID_ADAPTER *ioc, int id, int channel) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->id == id && raid_device->channel == channel) { + r = raid_device; + goto out; + } + } +out: + return r; +} + +struct leapioraid_raid_device *leapioraid_raid_device_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } +out: + return r; +} + +static +struct leapioraid_raid_device *leapioraid_scsihost_raid_device_find_by_wwid( + struct LEAPIORAID_ADAPTER *ioc, u64 wwid) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid != wwid) + continue; + r = raid_device; + goto out; + } +out: + return r; +} + +static void +leapioraid_scsihost_raid_device_add(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_device *raid_device) +{ + unsigned long flags; + u8 protection_mask; + + dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, + __func__, raid_device->handle, + (unsigned long long)raid_device->wwid)); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_add_tail(&raid_device->list, &ioc->raid_device_list); + if (!ioc->disable_eedp_support) { + protection_mask = scsi_host_get_prot(ioc->shost); + if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) { + scsi_host_set_prot(ioc->shost, protection_mask & 0x77); + pr_err( + "%s: Disabling DIX0 because of unsupport!\n", + ioc->name); + } + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +static void +leapioraid_scsihost_raid_device_remove(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_device *raid_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_del(&raid_device->list); + kfree(raid_device); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->handle != handle) + continue; + r = sas_expander; + goto out; + } +out: + return r; +} + +static +struct leapioraid_enclosure_node *leapioraid_scsihost_enclosure_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle) +{ + struct leapioraid_enclosure_node *enclosure_dev, *r; + + r = NULL; + list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { + if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) + continue; + r = enclosure_dev; + goto out; + } +out: + return r; +} + +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct leapioraid_raid_sas_node *sas_expander, *r; + + r = NULL; + if (!port) + return r; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address || + sas_expander->port != port) + continue; + r = sas_expander; + goto out; + } +out: + return r; +} + +static void +leapioraid_scsihost_expander_node_add(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&sas_expander->list, &ioc->sas_expander_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +static int +leapioraid_scsihost_is_sas_end_device(u32 device_info) +{ + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE && + ((device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) | + (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET) | + (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE))) + return 1; + else + return 0; +} + +static u8 +leapioraid_scsihost_scsi_lookup_find_by_target( + struct LEAPIORAID_ADAPTER *ioc, int id, + int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && scmd->device->channel == channel) + return 1; + } + return 0; +} + +static u8 +leapioraid_scsihost_scsi_lookup_find_by_lun( + struct LEAPIORAID_ADAPTER *ioc, int id, + unsigned int lun, int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && + scmd->device->channel == channel && + scmd->device->lun == lun) + return 1; + } + return 0; +} + +struct scsi_cmnd *leapioraid_scsihost_scsi_lookup_get( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct scsi_cmnd *scmd = NULL; + struct leapioraid_scsiio_tracker *st; + struct LeapioraidSCSIIOReq_t *mpi_request; + u32 unique_tag = smid - 1; + + if (smid > 0 && smid <= ioc->shost->can_queue) { + unique_tag = + ioc->io_queue_num[smid - + 1] << BLK_MQ_UNIQUE_TAG_BITS | (smid - 1); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + if (!mpi_request->DevHandle) + return scmd; + scmd = scsi_host_find_tag(ioc->shost, unique_tag); + if (scmd) { + st = leapioraid_base_scsi_cmd_priv(scmd); + if ((!st) || (st->cb_idx == 0xFF) || (st->smid == 0)) + scmd = NULL; + } + } + return scmd; +} + +static void +leapioraid_scsihost_display_sdev_qd(struct scsi_device *sdev) +{ + if (sdev->inquiry_len <= 7) + return; + sdev_printk(KERN_INFO, sdev, + "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, sdev->tagged_supported, + sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); +} + +static int +leapioraid_scsihost_change_queue_depth( + struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth; + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + max_depth = shost->can_queue; + + goto not_sata; + + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + goto not_sata; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + goto not_sata; + if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) + goto not_sata; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = + __leapioraid_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device) { + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + max_depth = LEAPIORAID_SATA_QUEUE_DEPTH; + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +not_sata: + if (!sdev->tagged_supported) + max_depth = 1; + if (qdepth > max_depth) + qdepth = max_depth; + scsi_change_queue_depth(sdev, qdepth); + leapioraid_scsihost_display_sdev_qd(sdev); + return sdev->queue_depth; +} + +void +leapioraid__scsihost_change_queue_depth( + struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + if (ioc->enable_sdev_max_qd) + qdepth = shost->can_queue; + leapioraid_scsihost_change_queue_depth(sdev, qdepth); +} + +static int +leapioraid_scsihost_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct sas_rphy *rphy; + + sas_target_priv_data = + kzalloc(sizeof(struct LEAPIORAID_TARGET), GFP_KERNEL); + if (!sas_target_priv_data) + return -ENOMEM; + starget->hostdata = sas_target_priv_data; + sas_target_priv_data->starget = starget; + sas_target_priv_data->handle = LEAPIORAID_INVALID_DEVICE_HANDLE; + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, starget->id, starget->channel); + if (raid_device) { + sas_target_priv_data->handle = raid_device->handle; + sas_target_priv_data->sas_address = raid_device->wwid; + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FLAGS_VOLUME; + raid_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return 0; + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + rphy = dev_to_rphy(starget->dev.parent); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); + if (sas_device) { + sas_target_priv_data->handle = sas_device->handle; + sas_target_priv_data->sas_address = sas_device->sas_address; + sas_target_priv_data->port = sas_device->port; + sas_target_priv_data->sas_dev = sas_device; + sas_device->starget = starget; + sas_device->id = starget->id; + sas_device->channel = starget->channel; + if (test_bit(sas_device->handle, ioc->pd_handles)) + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT; + if (sas_device->fast_path) + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FASTPATH_IO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return 0; +} + +static void +leapioraid_scsihost_target_destroy(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + + sas_target_priv_data = starget->hostdata; + if (!sas_target_priv_data) + return; + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, starget->id, starget->channel); + if (raid_device) { + raid_device->starget = NULL; + raid_device->sdev = NULL; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + goto out; + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = + __leapioraid_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device && (sas_device->starget == starget) + && (sas_device->id == starget->id) + && (sas_device->channel == starget->channel)) + sas_device->starget = NULL; + if (sas_device) { + sas_target_priv_data->sas_dev = NULL; + leapioraid_sas_device_put(sas_device); + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +out: + kfree(sas_target_priv_data); + starget->hostdata = NULL; +} + +static int +leapioraid_scsihost_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost; + struct LEAPIORAID_ADAPTER *ioc; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_target *starget; + struct leapioraid_raid_device *raid_device; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + sas_device_priv_data = + kzalloc(sizeof(*sas_device_priv_data), GFP_KERNEL); + if (!sas_device_priv_data) + return -ENOMEM; + sas_device_priv_data->lun = sdev->lun; + sas_device_priv_data->flags = LEAPIORAID_DEVICE_FLAGS_INIT; + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns++; + sas_device_priv_data->sas_target = sas_target_priv_data; + sdev->hostdata = sas_device_priv_data; + if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT)) + sdev->no_uld_attach = 1; + shost = dev_to_shost(&starget->dev); + ioc = leapioraid_shost_private(shost); + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id(ioc, + starget->id, + starget->channel); + if (raid_device) + raid_device->sdev = sdev; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } + if (!(sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, + sas_target_priv_data->sas_address, + sas_target_priv_data->port); + if (sas_device && (sas_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : sas_device->starget set to starget @ %d\n", + __func__, __LINE__); + sas_device->starget = starget; + } + if (sas_device) + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + return 0; +} + +static void +leapioraid_scsihost_slave_destroy(struct scsi_device *sdev) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct Scsi_Host *shost; + struct LEAPIORAID_ADAPTER *ioc; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + if (!sdev->hostdata) + return; + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns--; + shost = dev_to_shost(&starget->dev); + ioc = leapioraid_shost_private(shost); + if (!(sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_from_target(ioc, + sas_target_priv_data); + if (sas_device && !sas_target_priv_data->num_luns) + sas_device->starget = NULL; + if (sas_device) + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +static void +leapioraid_scsihost_display_sata_capabilities( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, struct scsi_device *sdev) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + u32 ioc_status; + u16 flags; + u32 device_info; + + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + flags = le16_to_cpu(sas_device_pg0.Flags); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + sdev_printk(KERN_INFO, sdev, + "atapi(%s), ncq(%s), asyn_notify(%s),\n\t\t" + "smart(%s), fua(%s), sw_preserve(%s)\n", + (device_info & LEAPIORAID_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : + "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" + : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) + ? "y" : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? + "y" : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" + : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : + "n"); +} + +static int +leapioraid_scsihost_is_raid(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return (sdev->channel == RAID_CHANNEL) ? 1 : 0; +} + +static void +leapioraid_scsihost_get_resync(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(sdev->host); + static struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct LeapioraidRaidVolP0_t vol_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 volume_status_flags; + u8 percent_complete; + u16 handle; + + percent_complete = 0; + handle = 0; + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, sdev->id, sdev->channel); + if (raid_device) { + handle = raid_device->handle; + percent_complete = raid_device->percent_complete; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!handle) + goto out; + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + percent_complete = 0; + goto out; + } + volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (!(volume_status_flags & + LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) + percent_complete = 0; +out: + raid_set_resync(leapioraid_raid_template, dev, percent_complete); +} + +static void +leapioraid_scsihost_get_state(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(sdev->host); + static struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct LeapioraidRaidVolP0_t vol_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 volstate; + enum raid_state state = RAID_STATE_UNKNOWN; + u16 handle = 0; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, sdev->id, sdev->channel); + if (raid_device) + handle = raid_device->handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!raid_device) + goto out; + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (volstate & LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { + state = RAID_STATE_RESYNCING; + goto out; + } + switch (vol_pg0.VolumeState) { + case LEAPIORAID_RAID_VOL_STATE_OPTIMAL: + case LEAPIORAID_RAID_VOL_STATE_ONLINE: + state = RAID_STATE_ACTIVE; + break; + case LEAPIORAID_RAID_VOL_STATE_DEGRADED: + state = RAID_STATE_DEGRADED; + break; + case LEAPIORAID_RAID_VOL_STATE_FAILED: + case LEAPIORAID_RAID_VOL_STATE_MISSING: + state = RAID_STATE_OFFLINE; + break; + } +out: + raid_set_state(leapioraid_raid_template, dev, state); +} + +static void +leapioraid_scsihost_set_level(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_device *sdev, u8 volume_type) +{ + enum raid_level level = RAID_LEVEL_UNKNOWN; + + switch (volume_type) { + case LEAPIORAID_RAID_VOL_TYPE_RAID0: + level = RAID_LEVEL_0; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID10: + case LEAPIORAID_RAID_VOL_TYPE_RAID1E: + level = RAID_LEVEL_10; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1: + level = RAID_LEVEL_1; + break; + } + raid_set_level(leapioraid_raid_template, &sdev->sdev_gendev, level); +} + +static int +leapioraid_scsihost_get_volume_capabilities( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_device *raid_device) +{ + struct LeapioraidRaidVolP0_t *vol_pg0; + struct LeapioraidRaidPDP0_t pd_pg0; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 sz; + u8 num_pds; + + if ((leapioraid_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + return 1; + } + raid_device->num_pds = num_pds; + sz = offsetof(struct LeapioraidRaidVolP0_t, PhysDisk) + (num_pds * + sizeof + (struct LEAPIORAID_RAIDVOL0_PHYS_DISK)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + return 1; + } + if ((leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + raid_device->handle, sz))) { + dfailprintk(ioc, + pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + kfree(vol_pg0); + return 1; + } + raid_device->volume_type = vol_pg0->VolumeType; + if (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, + LEAPIORAID_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[0].PhysDiskNum))) { + if (! + (leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, + le16_to_cpu(pd_pg0.DevHandle)))) { + raid_device->device_info = + le32_to_cpu(sas_device_pg0.DeviceInfo); + } + } + kfree(vol_pg0); + return 0; +} + +static void +leapioraid_scsihost_enable_tlr( + struct LEAPIORAID_ADAPTER *ioc, struct scsi_device *sdev) +{ + u8 data[30]; + u8 page_len, ii; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + + if (sdev->type != TYPE_TAPE) + return; + if (!(ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_TLR)) + return; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + return; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + return; + if (leapioraid_scsihost_inquiry_vpd_supported_pages(ioc, + sas_target_priv_data->handle, + sdev->lun, data, + sizeof(data)) != + DEVICE_READY) { + sas_device = + leapioraid_get_sdev_by_addr(ioc, + sas_target_priv_data->sas_address, + sas_target_priv_data->port); + if (sas_device) { + sdev_printk(KERN_INFO, sdev, + "%s: DEVICE NOT READY: handle(0x%04x),\n\t\t" + "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + __func__, + sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->phy, + (unsigned long long)sas_device->device_name); + leapioraid_scsihost_display_enclosure_chassis_info(NULL, + sas_device, + sdev, NULL); + leapioraid_sas_device_put(sas_device); + } + return; + } + page_len = data[3]; + for (ii = 4; ii < page_len + 4; ii++) { + if (data[ii] == 0x90) { + sas_device_priv_data->flags |= LEAPIORAID_DEVICE_TLR_ON; + return; + } + } +} + +static void +leapioraid_scsihost_enable_ssu_on_sata( + struct leapioraid_sas_device *sas_device, + struct scsi_device *sdev) +{ + if (!(sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE)) + return; + if (sas_device->ssd_device) { + sdev->manage_system_start_stop = 1; + sdev->manage_runtime_start_stop = 1; + } +} + +static int +leapioraid_scsihost_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + int qdepth; + u8 ssp_target = 0; + char *ds = ""; + char *r_level = ""; + u16 handle, volume_handle = 0; + u64 volume_wwid = 0; + u8 *serial_number = NULL; + enum device_responsive_state retval; + u8 count = 0; + + qdepth = 1; + sas_device_priv_data = sdev->hostdata; + sas_device_priv_data->configured_lun = 1; + sas_device_priv_data->flags &= ~LEAPIORAID_DEVICE_FLAGS_INIT; + sas_target_priv_data = sas_device_priv_data->sas_target; + handle = sas_target_priv_data->handle; + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = + leapioraid_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!raid_device) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + if (leapioraid_scsihost_get_volume_capabilities(ioc, raid_device)) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + if (raid_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = LEAPIORAID_SAS_QUEUE_DEPTH; + ds = "SSP"; + } else { + qdepth = LEAPIORAID_SATA_QUEUE_DEPTH; + if (raid_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + else + ds = "STP"; + } + switch (raid_device->volume_type) { + case LEAPIORAID_RAID_VOL_TYPE_RAID0: + r_level = "RAID0"; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1E: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + if (ioc->manu_pg10.OEMIdentifier && + (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & + 0x00000004) && + !(raid_device->num_pds % 2)) + r_level = "RAID10"; + else + r_level = "RAID1E"; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + r_level = "RAID1"; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID10: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + r_level = "RAID10"; + break; + case LEAPIORAID_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + r_level = "RAIDX"; + break; + } + if (!ioc->warpdrive_msg) + sdev_printk( + KERN_INFO, sdev, + "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n", + r_level, raid_device->handle, + (unsigned long long)raid_device->wwid, + raid_device->num_pds, ds); + if (shost->max_sectors > LEAPIORAID_RAID_MAX_SECTORS) { + blk_queue_max_hw_sectors(sdev->request_queue, + LEAPIORAID_RAID_MAX_SECTORS); + sdev_printk(KERN_INFO, sdev, + "Set queue's max_sector to: %u\n", + LEAPIORAID_RAID_MAX_SECTORS); + } + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + leapioraid_scsihost_set_level(ioc, sdev, raid_device->volume_type); + return 0; + } + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + if (leapioraid_config_get_volume_handle(ioc, handle, + &volume_handle)) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + if (volume_handle && leapioraid_config_get_volume_wwid(ioc, + volume_handle, + &volume_wwid)) { + dfailprintk(ioc, + pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + } + leapioraid_scsihost_inquiry_vpd_sn(ioc, handle, &serial_number); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); + if (!sas_device) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + kfree(serial_number); + return 1; + } + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + sas_device->serial_number = serial_number; + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + ssp_target = 1; + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SEP) { + sdev_printk(KERN_WARNING, sdev, + "set ignore_delay_remove for handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->ignore_delay_remove = 1; + ds = "SES"; + } else + ds = "SSP"; + } else { + qdepth = ioc->max_sata_qd; + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET) + ds = "STP"; + else if (sas_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + } + sdev_printk( + KERN_INFO, sdev, + "%s: handle(0x%04x), sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + ds, handle, (unsigned long long)sas_device->sas_address, + sas_device->phy, + (unsigned long long)sas_device->device_name); + leapioraid_scsihost_display_enclosure_chassis_info( + NULL, sas_device, sdev, NULL); + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!ssp_target) { + leapioraid_scsihost_display_sata_capabilities(ioc, handle, sdev); + do { + retval = leapioraid_scsihost_ata_pass_thru_idd(ioc, handle, + &sas_device->ssd_device, 30, 0); + } while ((retval == DEVICE_RETRY || retval == DEVICE_RETRY_UA) + && count++ < 3); + } + leapioraid_scsihost_enable_ssu_on_sata(sas_device, sdev); + if (serial_number) + sdev_printk(KERN_INFO, sdev, "serial_number(%s)\n", + serial_number); + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + if (ssp_target) { + sas_read_port_mode_page(sdev); + leapioraid_scsihost_enable_tlr(ioc, sdev); + } + + return 0; +} + +static int +leapioraid_scsihost_bios_param( + struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int params[]) +{ + int heads; + int sectors; + sector_t cylinders; + ulong dummy; + + heads = 64; + sectors = 32; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + if ((ulong) capacity >= 0x200000) { + heads = 255; + sectors = 63; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + } + params[0] = heads; + params[1] = sectors; + params[2] = cylinders; + return 0; +} + +static void +leapioraid_scsihost_response_code( + struct LEAPIORAID_ADAPTER *ioc, u8 response_code) +{ + char *desc; + + switch (response_code) { + case LEAPIORAID_SCSITASKMGMT_RSP_TM_COMPLETE: + desc = "task management request completed"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME: + desc = "invalid frame"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: + desc = "task management request not supported"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_FAILED: + desc = "task management request failed"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED: + desc = "task management request succeeded"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_INVALID_LUN: + desc = "invalid lun"; + break; + case 0xA: + desc = "overlapped tag attempted"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: + desc = "task queued, however not sent to target"; + break; + default: + desc = "unknown"; + break; + } + pr_warn("%s response_code(0x%01x): %s\n", + ioc->name, response_code, desc); +} + +static u8 +leapioraid_scsihost_tm_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + if (ioc->tm_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->tm_cmds.smid != smid) + return 1; + ioc->tm_cmds.status |= LEAPIORAID_CMD_COMPLETE; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength * 4); + ioc->tm_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + } + ioc->tm_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->tm_cmds.done); + return 1; +} + +void +leapioraid_scsihost_set_tm_flag( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 1; + skip = 1; + ioc->ignore_loginfos = 1; + } + } +} + +void +leapioraid_scsihost_clear_tm_flag( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 0; + skip = 1; + ioc->ignore_loginfos = 0; + } + } +} + +static int +leapioraid_scsihost_tm_cmd_map_status( + struct LEAPIORAID_ADAPTER *ioc, uint channel, + uint id, uint lun, u8 type, u16 smid_task) +{ + if (smid_task <= ioc->shost->can_queue) { + switch (type) { + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if (! + (leapioraid_scsihost_scsi_lookup_find_by_target + (ioc, id, channel))) + return SUCCESS; + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (! + (leapioraid_scsihost_scsi_lookup_find_by_lun + (ioc, id, lun, channel))) + return SUCCESS; + break; + default: + return SUCCESS; + } + } else if (smid_task == ioc->scsih_cmds.smid) { + if ((ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE) || + (ioc->scsih_cmds.status & LEAPIORAID_CMD_NOT_USED)) + return SUCCESS; + } else if (smid_task == ioc->ctl_cmds.smid) { + if ((ioc->ctl_cmds.status & LEAPIORAID_CMD_COMPLETE) || + (ioc->ctl_cmds.status & LEAPIORAID_CMD_NOT_USED)) + return SUCCESS; + } + return FAILED; +} + +static int +leapioraid_scsihost_tm_post_processing(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task) +{ + int rc; + + rc = leapioraid_scsihost_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); + if (rc == SUCCESS) + return rc; + pr_err( + "%s Poll finish of smid(%d),task_type(0x%02x),handle(0x%04x)\n", + ioc->name, + smid_task, + type, + handle); + leapioraid_base_mask_interrupts(ioc); + leapioraid_base_sync_reply_irqs(ioc, 1); + leapioraid_base_unmask_interrupts(ioc); + return leapioraid_scsihost_tm_cmd_map_status( + ioc, channel, id, lun, type, smid_task); +} + +int +leapioraid_scsihost_issue_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task, u8 timeout, u8 tr_method) +{ + struct LeapioraidSCSITmgReq_t *mpi_request; + struct LeapioraidSCSITmgRep_t *mpi_reply; + struct LeapioraidSCSIIOReq_t *request; + u16 smid = 0; + u32 ioc_state; + struct leapioraid_scsiio_tracker *scsi_lookup = NULL; + int rc; + u16 msix_task = 0; + u8 issue_reset = 0; + + lockdep_assert_held(&ioc->tm_cmds.mutex); + if (ioc->tm_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_info("%s %s: tm_cmd busy!!!\n", + __func__, ioc->name); + return FAILED; + } + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return FAILED; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + if (ioc_state & LEAPIORAID_DOORBELL_USED) { + pr_info("%s unexpected doorbell active!\n", + ioc->name); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return FAILED; + } + if (type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + scsi_lookup = leapioraid_get_st_from_smid(ioc, smid_task); + dtmprintk(ioc, pr_info( + "%s sending tm: handle(0x%04x),\n\t\t" + "task_type(0x%02x), timeout(%d) tr_method(0x%x) smid(%d)\n", + ioc->name, + handle, + type, + timeout, + tr_method, + smid_task)); + ioc->tm_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->tm_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + memset(ioc->tm_cmds.reply, 0, sizeof(struct LeapioraidSCSITmgRep_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = type; + mpi_request->MsgFlags = tr_method; + if (type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + mpi_request->TaskMID = cpu_to_le16(smid_task); + int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); + leapioraid_scsihost_set_tm_flag(ioc, handle); + init_completion(&ioc->tm_cmds.done); + if ((type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) && + (scsi_lookup && (scsi_lookup->msix_io < ioc->reply_queue_count))) + msix_task = scsi_lookup->msix_io; + else + msix_task = 0; + ioc->put_smid_hi_priority(ioc, smid, msix_task); + wait_for_completion_timeout(&ioc->tm_cmds.done, timeout * HZ); + if (!(ioc->tm_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->tm_cmds.status, mpi_request, + sizeof + (struct LeapioraidSCSITmgReq_t) + / 4, issue_reset); + if (issue_reset) { + rc = leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + goto out; + } + } + leapioraid_base_sync_reply_irqs(ioc, 0); + if (ioc->tm_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->tm_cmds.reply; + dtmprintk(ioc, pr_info( + "%s complete tm: ioc_status(0x%04x),\n\t\t" + "loginfo(0x%08x), term_count(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + if (ioc->logging_level & LEAPIORAID_DEBUG_TM) { + leapioraid_scsihost_response_code( + ioc, mpi_reply->ResponseCode); + if (mpi_reply->IOCStatus) + leapioraid_debug_dump_mf( + mpi_request, + sizeof(struct LeapioraidSCSITmgReq_t) / 4); + } + } + switch (type) { + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + rc = SUCCESS; + request = leapioraid_base_get_msg_frame(ioc, smid_task); + if (le16_to_cpu(request->DevHandle) != handle) + break; + pr_err( + "%s Task abort tm failed:\n\t\t" + "handle(0x%04x), timeout(%d),\n\t\t" + "tr_method(0x%x), smid(%d), msix_index(%d)\n", + ioc->name, + handle, + timeout, + tr_method, + smid_task, + msix_task); + rc = FAILED; + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + rc = leapioraid_scsihost_tm_post_processing( + ioc, handle, channel, id, lun, type, smid_task); + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + rc = SUCCESS; + break; + default: + rc = FAILED; + break; + } +out: + leapioraid_scsihost_clear_tm_flag(ioc, handle); + ioc->tm_cmds.status = LEAPIORAID_CMD_NOT_USED; + return rc; +} + +int +leapioraid_scsihost_issue_locked_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task, u8 timeout, u8 tr_method) +{ + int ret; + + mutex_lock(&ioc->tm_cmds.mutex); + ret = leapioraid_scsihost_issue_tm( + ioc, handle, channel, id, lun, type, + smid_task, timeout, tr_method); + mutex_unlock(&ioc->tm_cmds.mutex); + return ret; +} + +static void +leapioraid_scsihost_tm_display_info( + struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *priv_target = starget->hostdata; + struct leapioraid_sas_device *sas_device = NULL; + unsigned long flags; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->warpdrive_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + scsi_print_command(scmd); + if (priv_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + starget_printk( + KERN_INFO, starget, "%s handle(0x%04x), %s wwid(0x%016llx)\n", + device_str, + priv_target->handle, device_str, + (unsigned long long)priv_target->sas_address); + } else { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = + __leapioraid_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + if (priv_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + starget_printk(KERN_INFO, starget, + "volume handle(0x%04x), volume wwid(0x%016llx)\n", + sas_device->volume_handle, + (unsigned long long)sas_device->volume_wwid); + } + starget_printk(KERN_INFO, starget, + "%s: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address, sas_device->phy); + leapioraid_scsihost_display_enclosure_chassis_info(NULL, + sas_device, + NULL, starget); + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } +} + +static int +leapioraid_scsihost_abort(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + u16 handle; + int r; + struct leapioraid_scsiio_tracker *st + = leapioraid_base_scsi_cmd_priv(scmd); + u8 timeout = 30; + + sdev_printk( + KERN_INFO, scmd->device, + "attempting task abort! scmd(0x%p), outstanding for %u ms & timeout %u ms\n", + scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), + (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000); + leapioraid_scsihost_tm_display_info(ioc, scmd); + if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", + ((ioc->remove_host) ? ("shost is getting removed!") + : ("pci device been removed!")), scmd); + if (st && st->smid) + leapioraid_base_free_smid(ioc, st->smid); + scmd->result = DID_NO_CONNECT << 16; + r = FAILED; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + if (st == NULL || st->cb_idx == 0xFF) { + sdev_printk(KERN_INFO, scmd->device, + "No ref at driver, assuming scmd(0x%p) might have completed\n", + scmd); + scmd->result = DID_RESET << 16; + r = SUCCESS; + goto out; + } + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT || + sas_device_priv_data->sas_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + leapioraid_halt_firmware(ioc, 0); + handle = sas_device_priv_data->sas_target->handle; + r = leapioraid_scsihost_issue_locked_tm( + ioc, handle, + scmd->device->channel, + scmd->device->id, + scmd->device->lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + st->smid, timeout, 0); +out: + sdev_printk( + KERN_INFO, scmd->device, + "task abort: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + return r; +} + +static int +leapioraid_scsihost_dev_reset(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct leapioraid_sas_device *sas_device = NULL; + u16 handle; + u8 tr_method = 0; + u8 tr_timeout = 30; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *target_priv_data = starget->hostdata; + + sdev_printk(KERN_INFO, scmd->device, + "attempting device reset! scmd(0x%p)\n", scmd); + leapioraid_scsihost_tm_display_info(ioc, scmd); + if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", + ((ioc->remove_host) ? ("shost is getting removed!") + : ("pci device been removed!")), scmd); + scmd->result = DID_NO_CONNECT << 16; + r = FAILED; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + handle = 0; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = leapioraid_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + r = leapioraid_scsihost_issue_locked_tm(ioc, handle, + scmd->device->channel, + scmd->device->id, + scmd->device->lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, + 0, tr_timeout, tr_method); +out: + sdev_printk(KERN_INFO, scmd->device, + "device reset: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + if (sas_device) + leapioraid_sas_device_put(sas_device); + return r; +} + +static int +leapioraid_scsihost_target_reset(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct leapioraid_sas_device *sas_device = NULL; + u16 handle; + u8 tr_method = 0; + u8 tr_timeout = 30; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *target_priv_data = starget->hostdata; + + starget_printk(KERN_INFO, starget, + "attempting target reset! scmd(0x%p)\n", scmd); + leapioraid_scsihost_tm_display_info(ioc, scmd); + if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", + ((ioc->remove_host) ? ("shost is getting removed!") + : ("pci device been removed!")), scmd); + scmd->result = DID_NO_CONNECT << 16; + r = FAILED; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + starget_printk(KERN_INFO, starget, + "target been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + handle = 0; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = leapioraid_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + r = leapioraid_scsihost_issue_locked_tm(ioc, handle, + scmd->device->channel, + scmd->device->id, 0, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + 0, tr_timeout, tr_method); +out: + starget_printk(KERN_INFO, starget, + "target reset: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + if (sas_device) + leapioraid_sas_device_put(sas_device); + return r; +} + +static int +leapioraid_scsihost_host_reset(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + int r, retval; + + pr_info("%s attempting host reset! scmd(0x%p)\n", + ioc->name, scmd); + scsi_print_command(scmd); + if (ioc->is_driver_loading || ioc->remove_host) { + pr_info("%s Blocking the host reset\n", + ioc->name); + r = FAILED; + goto out; + } + retval = leapioraid_base_hard_reset_handler( + ioc, FORCE_BIG_HAMMER); + r = (retval < 0) ? FAILED : SUCCESS; +out: + pr_info("%s host reset: %s scmd(0x%p)\n", + ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), + scmd); + return r; +} + +static void +leapioraid_scsihost_fw_event_add(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + spin_lock_irqsave(&ioc->fw_event_lock, flags); + leapioraid_fw_event_work_get(fw_event); + INIT_LIST_HEAD(&fw_event->list); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + INIT_WORK(&fw_event->work, leapioraid_firmware_event_work); + leapioraid_fw_event_work_get(fw_event); + queue_work(ioc->firmware_event_thread, &fw_event->work); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_fw_event_del_from_list( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&fw_event->list)) { + list_del_init(&fw_event->list); + leapioraid_fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_fw_event_requeue( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event, unsigned long delay) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + spin_lock_irqsave(&ioc->fw_event_lock, flags); + leapioraid_fw_event_work_get(fw_event); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + if (!fw_event->delayed_work_active) { + fw_event->delayed_work_active = 1; + INIT_DELAYED_WORK(&fw_event->delayed_work, + leapioraid_firmware_event_work_delayed); + } + queue_delayed_work(ioc->firmware_event_thread, &fw_event->delayed_work, + msecs_to_jiffies(delay)); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_error_recovery_delete_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = LEAPIORAID_REMOVE_UNRESPONDING_DEVICES; + fw_event->ioc = ioc; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); +} + +void +leapioraid_port_enable_complete(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = LEAPIORAID_PORT_ENABLE_COMPLETE; + fw_event->ioc = ioc; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); +} + +static struct leapioraid_fw_event_work *dequeue_next_fw_event( + struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct leapioraid_fw_event_work *fw_event = NULL; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&ioc->fw_event_list)) { + fw_event = list_first_entry(&ioc->fw_event_list, + struct leapioraid_fw_event_work, list); + list_del_init(&fw_event->list); + leapioraid_fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + return fw_event; +} + +static void +leapioraid_scsihost_fw_event_cleanup_queue( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_fw_event_work *fw_event; + bool rc = false; + + if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || + !ioc->firmware_event_thread || in_interrupt()) + return; + + ioc->fw_events_cleanup = 1; + if (ioc->shost_recovery && ioc->current_event) + ioc->current_event->ignore = 1; + while ((fw_event = dequeue_next_fw_event(ioc)) || + (fw_event = ioc->current_event)) { + if (fw_event == ioc->current_event && + ioc->current_event->event != + LEAPIORAID_REMOVE_UNRESPONDING_DEVICES) { + ioc->current_event = NULL; + continue; + } + if (fw_event->event == LEAPIORAID_PORT_ENABLE_COMPLETE) { + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_RESET; + ioc->start_scan = 0; + } + if (fw_event->delayed_work_active) + rc = cancel_delayed_work_sync(&fw_event->delayed_work); + else + rc = cancel_work_sync(&fw_event->work); + if (rc) + leapioraid_fw_event_work_put(fw_event); + } + ioc->fw_events_cleanup = 0; +} + +static void +leapioraid_scsihost_internal_device_block( + struct scsi_device *sdev, + struct LEAPIORAID_DEVICE + *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + + r = scsi_internal_device_block_nowait(sdev); + if (r == -EINVAL) + sdev_printk(KERN_WARNING, sdev, + "device_block failed with return(%d) for handle(0x%04x)\n", + r, sas_device_priv_data->sas_target->handle); +} + +static void +leapioraid_scsihost_internal_device_unblock(struct scsi_device *sdev, + struct LEAPIORAID_DEVICE + *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_WARNING, sdev, + "device_unblock and setting to running, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 0; + + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + if (r == -EINVAL) { + sdev_printk(KERN_WARNING, sdev, + "device_unblock failed with return(%d)\n\t\t" + "for handle(0x%04x) performing a block followed by an unblock\n", + r, + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + r = scsi_internal_device_block_nowait(sdev); + if (r) + sdev_printk(KERN_WARNING, sdev, + "retried device_block failed with return(%d)\n\t\t" + "for handle(0x%04x)\n", + r, + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 0; + + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + if (r) + sdev_printk(KERN_WARNING, sdev, + "retried device_unblock failed\n\t\t" + "with return(%d) for handle(0x%04x)\n", + r, + sas_device_priv_data->sas_target->handle); + } +} + +static void +leapioraid_scsihost_ublock_io_all_device( + struct LEAPIORAID_ADAPTER *ioc, u8 no_turs) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target; + enum device_responsive_state rc; + struct scsi_device *sdev; + struct leapioraid_sas_device *sas_device = NULL; + int count; + u8 tr_timeout = 30; + u8 tr_method = 0; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target || sas_target->deleted) + continue; + if (!sas_device_priv_data->block) + continue; + count = 0; + if (no_turs) { + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + leapioraid_scsihost_internal_device_unblock(sdev, + sas_device_priv_data); + continue; + } + do { + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, + sas_target->handle, + 0, + (sas_target->flags + & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT), + sdev->lun, + tr_timeout, + tr_method); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + ssleep(1); + } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + && count++ < 144); + sas_device_priv_data->block = 0; + if (rc != DEVICE_READY) + sas_device_priv_data->deleted = 1; + leapioraid_scsihost_internal_device_unblock( + sdev, sas_device_priv_data); + if (rc != DEVICE_READY) { + sdev_printk(KERN_WARNING, sdev, + "%s: device_offlined, handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + scsi_device_set_state(sdev, SDEV_OFFLINE); + sas_device = leapioraid_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); + if (sas_device) { + leapioraid_scsihost_display_enclosure_chassis_info( + NULL, + sas_device, + sdev, + NULL); + leapioraid_sas_device_put(sas_device); + } + } else + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + } +} + +static void +leapioraid_scsihost_ublock_io_device_wait( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target; + enum device_responsive_state rc; + struct scsi_device *sdev; + int count, host_reset_completion_count; + struct leapioraid_sas_device *sas_device; + u8 tr_timeout = 30; + u8 tr_method = 0; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target) + continue; + if (sas_target->sas_address != sas_address || + sas_target->port != port) + continue; + if (sdev->sdev_state == SDEV_OFFLINE) { + sas_device_priv_data->block = 1; + sas_device_priv_data->deleted = 0; + scsi_device_set_state(sdev, SDEV_RUNNING); + scsi_internal_device_block_nowait(sdev); + } + } + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target) + continue; + if (sas_target->sas_address != sas_address || + sas_target->port != port) + continue; + if (!sas_device_priv_data->block) + continue; + count = 0; + do { + host_reset_completion_count = 0; + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, + sas_target->handle, + 0, + (sas_target->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT), + sdev->lun, + tr_timeout, + tr_method); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT + || rc == DEVICE_RETRY_UA) { + do { + msleep(500); + host_reset_completion_count++; + } while (rc == DEVICE_RETRY && + ioc->shost_recovery); + if (host_reset_completion_count > 1) { + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, sas_target->handle, 0, + (sas_target->flags + & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT), + sdev->lun, tr_timeout, tr_method); + if (rc == DEVICE_RETRY + || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT + || rc == DEVICE_RETRY_UA) + msleep(500); + } + continue; + } + } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + && count++ <= 144); + sas_device_priv_data->block = 0; + if (rc != DEVICE_READY) + sas_device_priv_data->deleted = 1; + + scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + + if (rc != DEVICE_READY) { + sdev_printk(KERN_WARNING, sdev, + "%s: device_offlined, handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + sas_device = + leapioraid_get_sdev_by_handle(ioc, + sas_device_priv_data->sas_target->handle); + if (sas_device) { + leapioraid_scsihost_display_enclosure_chassis_info(NULL, + sas_device, + sdev, + NULL); + leapioraid_sas_device_put(sas_device); + } + scsi_device_set_state(sdev, SDEV_OFFLINE); + } else { + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + } + } +} + +static void +leapioraid_scsihost_ublock_io_device( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + if (sas_device_priv_data->sas_target->sas_address + != sas_address || + sas_device_priv_data->sas_target->port != port) + continue; + if (sas_device_priv_data->block) { + leapioraid_scsihost_internal_device_unblock(sdev, + sas_device_priv_data); + } + scsi_device_set_state(sdev, SDEV_OFFLINE); + } +} + +static void leapioraid_scsihost_block_io_all_device( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + continue; + } + leapioraid_scsihost_internal_device_block( + sdev, sas_device_priv_data); + } +} + +static void +leapioraid_scsihost_block_io_device( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + struct leapioraid_sas_device *sas_device; + + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle != handle) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device && sas_device->pend_sas_rphy_add) + continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + continue; + } + leapioraid_scsihost_internal_device_block( + sdev, sas_device_priv_data); + } + if (sas_device) + leapioraid_sas_device_put(sas_device); +} + +static void +leapioraid_scsihost_block_io_to_children_attached_to_ex( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander) +{ + struct leapioraid_sas_port *leapioraid_port; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_sas_node *expander_sibling; + unsigned long flags; + + if (!sas_expander) + return; + list_for_each_entry(leapioraid_port, + &sas_expander->sas_port_list, port_list) { + if (leapioraid_port->remote_identify.device_type == + SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + if (sas_device) { + set_bit(sas_device->handle, + ioc->blocking_handles); + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + } + list_for_each_entry(leapioraid_port, + &sas_expander->sas_port_list, port_list) { + if (leapioraid_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + leapioraid_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) { + expander_sibling = + leapioraid_scsihost_expander_find_by_sas_address + (ioc, leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + leapioraid_scsihost_block_io_to_children_attached_to_ex( + ioc, expander_sibling); + } + } +} + +static void +leapioraid_scsihost_block_io_to_children_attached_directly( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasTopoChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == + LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) + leapioraid_scsihost_block_io_device(ioc, handle); + } +} + +static void +leapioraid_scsihost_tm_tr_send( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid; + struct leapioraid_sas_device *sas_device = NULL; + struct LEAPIORAID_TARGET *sas_target_priv_data = NULL; + u64 sas_address = 0; + unsigned long flags; + struct leapioraid_tr_list *delayed_tr; + u32 ioc_state; + struct leapioraid_hba_port *port = NULL; + u8 tr_method = 0; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host in pci error recovery: handle(0x%04x)\n", + __func__, ioc->name, handle)); + return; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info( + "%s %s: host is not operational: handle(0x%04x)\n", + __func__, ioc->name, handle)); + return; + } + if (test_bit(handle, ioc->pd_handles)) + return; + clear_bit(handle, ioc->pend_os_device_add); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device && sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + sas_address = sas_device->sas_address; + port = sas_device->port; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + if (sas_target_priv_data) { + dewtprintk(ioc, pr_err( + "%s %s: setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, handle, + (unsigned long long)sas_address)); + if (sas_device) { + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, + sas_device, + NULL, + NULL)); + } + leapioraid_scsihost_ublock_io_device(ioc, sas_address, port); + sas_target_priv_data->handle = + LEAPIORAID_INVALID_DEVICE_HANDLE; + } + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + goto out; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + goto out; + } + dewtprintk(ioc, pr_info( + "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_tr_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpi_request->MsgFlags = tr_method; + set_bit(handle, ioc->device_remove_in_progress); + ioc->put_smid_hi_priority(ioc, smid, 0); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); +} + +static u8 +leapioraid_scsihost_tm_tr_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + u16 handle; + struct LeapioraidSCSITmgReq_t *mpi_request_tm; + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + struct LeapioraidSasIoUnitControlReq_t *mpi_request; + u16 smid_sas_ctrl; + u32 ioc_state; + struct leapioraid_sc_list *delayed_sc; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host in pci error recovery\n", __func__, + ioc->name)); + return 1; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info( + "%s %s: host is not operational\n", __func__, ioc->name)); + return 1; + } + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + mpi_request_tm = leapioraid_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, pr_err( + "%s spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + dewtprintk(ioc, pr_err( + "%s tr_complete: handle(0x%04x), (open) smid(%d),\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", + ioc->name, + handle, + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + smid_sas_ctrl = + leapioraid_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); + if (!smid_sas_ctrl) { + delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); + if (!delayed_sc) + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); + INIT_LIST_HEAD(&delayed_sc->list); + delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); + list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:sc:handle(0x%04x), (open)\n", + ioc->name, handle)); + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); + } + dewtprintk(ioc, pr_info( + "%s sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid_sas_ctrl); + memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL; + mpi_request->Operation = LEAPIORAID_CTRL_OP_REMOVE_DEVICE; + mpi_request->DevHandle = mpi_request_tm->DevHandle; + ioc->put_smid_default(ioc, smid_sas_ctrl); + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); +} + +inline bool +leapioraid_scsihost_allow_scmd_to_device( + struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + if (ioc->pci_error_recovery) + return false; + if (ioc->adapter_over_temp) + return false; + if (ioc->remove_host) { + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return false; + switch (scmd->cmnd[0]) { + case SYNCHRONIZE_CACHE: + case START_STOP: + return true; + default: + return false; + } + } + return true; +} + +static u8 +leapioraid_scsihost_sas_control_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + u16 dev_handle; + + if (likely(mpi_reply)) { + dev_handle + = ((struct LeapioraidIoUnitControlRep_t *)mpi_reply)->DevHandle; + dewtprintk(ioc, pr_err( + "%s sc_complete:handle(0x%04x), (open) smid(%d),\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + le16_to_cpu(dev_handle), + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + if (le16_to_cpu(mpi_reply->IOCStatus) == + LEAPIORAID_IOCSTATUS_SUCCESS) { + clear_bit(le16_to_cpu(dev_handle), + ioc->device_remove_in_progress); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)] = 0; + } else if (ioc->tm_tr_retry[le16_to_cpu(dev_handle)] < 3) { + dewtprintk(ioc, pr_err( + "%s re-initiating tm_tr_send:handle(0x%04x)\n", + ioc->name, + le16_to_cpu(dev_handle))); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)]++; + leapioraid_scsihost_tm_tr_send(ioc, le16_to_cpu(dev_handle)); + } else { + dewtprintk(ioc, pr_err( + "%s Exiting out of tm_tr_send retries:handle(0x%04x)\n", + ioc->name, + le16_to_cpu(dev_handle))); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)] = 0; + clear_bit(le16_to_cpu(dev_handle), + ioc->device_remove_in_progress); + } + } else { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + } + return leapioraid_check_for_pending_internal_cmds(ioc, smid); +} + +static void +leapioraid_scsihost_tm_tr_volume_send( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid; + struct leapioraid_tr_list *delayed_tr; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host reset in progress!\n", __func__, ioc->name)); + return; + } + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + return; + } + dewtprintk(ioc, pr_info( + "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_tr_volume_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + ioc->put_smid_hi_priority(ioc, smid, 0); +} + +static u8 +leapioraid_scsihost_tm_volume_tr_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + u16 handle; + struct LeapioraidSCSITmgReq_t *mpi_request_tm; + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host reset in progress!\n", __func__, ioc->name)); + return 1; + } + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + mpi_request_tm = leapioraid_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, pr_err( + "%s spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + dewtprintk(ioc, pr_err( + "%s tr_complete:handle(0x%04x), (open) smid(%d),\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", + ioc->name, + handle, + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); +} + +static void +leapioraid_scsihost_tm_internal_tr_send( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_tr_list *delayed_tr; + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid; + u8 tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_internal_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, + &ioc->delayed_internal_tm_list); + dewtprintk(ioc, + pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + return; + } + dewtprintk(ioc, pr_info( + "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_tr_internal_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpi_request->MsgFlags = tr_method; + ioc->put_smid_hi_priority(ioc, smid, 0); +} + +static u8 +leapioraid_scsihost_tm_internal_tr_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + + if (likely(mpi_reply)) { + dewtprintk(ioc, pr_err( + "%s tr_complete:handle(0x%04x),\n\t\t" + "(open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->DevHandle), + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } else { + pr_err("%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); +} + +static void +leapioraid_scsihost_issue_delayed_event_ack( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + U16 event, U32 event_context) +{ + struct LeapioraidEventAckReq_t *ack_request; + int i = smid - ioc->internal_smid; + unsigned long flags; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + dewtprintk(ioc, pr_info( + "%s EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", + ioc->name, le16_to_cpu(event), + smid, ioc->base_cb_idx)); + ack_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(struct LeapioraidEventAckReq_t)); + ack_request->Function = LEAPIORAID_FUNC_EVENT_ACK; + ack_request->Event = event; + ack_request->EventContext = event_context; + ack_request->VF_ID = 0; + ack_request->VP_ID = 0; + ioc->put_smid_default(ioc, smid); +} + +static void +leapioraid_scsihost_issue_delayed_sas_io_unit_ctrl( + struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 handle) +{ + struct LeapioraidSasIoUnitControlReq_t *mpi_request; + u32 ioc_state; + int i = smid - ioc->internal_smid; + unsigned long flags; + + if (ioc->remove_host) { + dewtprintk(ioc, pr_info( + "%s %s: host has been removed\n", __func__, ioc->name)); + return; + } else if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host in pci error recovery\n", __func__, + ioc->name)); + return; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info( + "%s %s: host is not operational\n", __func__, ioc->name)); + return; + } + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + dewtprintk(ioc, pr_info( + "%s sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_sas_control_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL; + mpi_request->Operation = 0x0D; + mpi_request->DevHandle = cpu_to_le16(handle); + ioc->put_smid_default(ioc, smid); +} + +u8 +leapioraid_check_for_pending_internal_cmds(struct LEAPIORAID_ADAPTER *ioc, + u16 smid) +{ + struct leapioraid_sc_list *delayed_sc; + struct leapioraid_event_ack_list *delayed_event_ack; + + if (!list_empty(&ioc->delayed_event_ack_list)) { + delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, + struct leapioraid_event_ack_list, list); + leapioraid_scsihost_issue_delayed_event_ack(ioc, smid, + delayed_event_ack->Event, + delayed_event_ack->EventContext); + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + return 0; + } + if (!list_empty(&ioc->delayed_sc_list)) { + delayed_sc = list_entry(ioc->delayed_sc_list.next, + struct leapioraid_sc_list, list); + leapioraid_scsihost_issue_delayed_sas_io_unit_ctrl(ioc, smid, + delayed_sc->handle); + list_del(&delayed_sc->list); + kfree(delayed_sc); + return 0; + } + return 1; +} + +static u8 +leapioraid_scsihost_check_for_pending_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct leapioraid_tr_list *delayed_tr; + + if (!list_empty(&ioc->delayed_tr_volume_list)) { + delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, + struct leapioraid_tr_list, list); + leapioraid_base_free_smid(ioc, smid); + leapioraid_scsihost_tm_tr_volume_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + if (!list_empty(&ioc->delayed_tr_list)) { + delayed_tr = list_entry(ioc->delayed_tr_list.next, + struct leapioraid_tr_list, list); + leapioraid_base_free_smid(ioc, smid); + leapioraid_scsihost_tm_tr_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + if (!list_empty(&ioc->delayed_internal_tm_list)) { + delayed_tr = list_entry(ioc->delayed_internal_tm_list.next, + struct leapioraid_tr_list, list); + leapioraid_base_free_smid(ioc, smid); + leapioraid_scsihost_tm_internal_tr_send( + ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + return 1; +} + +static void +leapioraid_scsihost_check_topo_delete_events( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasTopoChangeList_t *event_data) +{ + struct leapioraid_fw_event_work *fw_event; + struct LeapioraidEventDataSasTopoChangeList_t *local_event_data; + u16 expander_handle; + struct leapioraid_raid_sas_node *sas_expander; + unsigned long flags; + int i, reason_code; + u16 handle; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) + leapioraid_scsihost_tm_tr_send(ioc, handle); + } + expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); + if (expander_handle < ioc->sas_hba.num_phys) { + leapioraid_scsihost_block_io_to_children_attached_directly( + ioc, event_data); + return; + } + if (event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = leapioraid_scsihost_expander_find_by_handle( + ioc, expander_handle); + leapioraid_scsihost_block_io_to_children_attached_to_ex( + ioc, sas_expander); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + do { + handle = find_first_bit(ioc->blocking_handles, + ioc->facts.MaxDevHandle); + if (handle < ioc->facts.MaxDevHandle) + leapioraid_scsihost_block_io_device(ioc, handle); + } while (test_and_clear_bit(handle, ioc->blocking_handles)); + } else if (event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING) + leapioraid_scsihost_block_io_to_children_attached_directly( + ioc, event_data); + if (event_data->ExpStatus != LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING) + return; + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_for_each_entry(fw_event, &ioc->fw_event_list, list) { + if (fw_event->event != LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST || + fw_event->ignore) + continue; + local_event_data = fw_event->event_data; + if (local_event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED || + local_event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING) { + if (le16_to_cpu(local_event_data->ExpanderDevHandle) == + expander_handle) { + dewtprintk(ioc, pr_err( + "%s setting ignoring flag\n", + ioc->name)); + fw_event->ignore = 1; + } + } + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_set_volume_delete_flag( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device; + struct LEAPIORAID_TARGET *sas_target_priv_data; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_raid_device_find_by_handle( + ioc, handle); + if (raid_device && raid_device->starget && + raid_device->starget->hostdata) { + sas_target_priv_data = raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + dewtprintk(ioc, pr_err( + "%s setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, handle, + (unsigned long long)raid_device->wwid)); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +static void +leapioraid_scsihost_set_volume_handle_for_tr( + u16 handle, u16 *a, u16 *b) +{ + if (!handle || handle == *a || handle == *b) + return; + if (!*a) + *a = handle; + else if (!*b) + *b = handle; +} + +static void +leapioraid_scsihost_check_ir_config_unhide_events( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrCfgChangeList_t *event_data) +{ + struct LeapioraidEventIrCfgEle_t *element; + int i; + u16 handle, volume_handle, a, b; + struct leapioraid_tr_list *delayed_tr; + + a = 0; + b = 0; + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == + LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED || + element->ReasonCode == LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED) { + volume_handle = le16_to_cpu(element->VolDevHandle); + leapioraid_scsihost_set_volume_delete_flag(ioc, volume_handle); + leapioraid_scsihost_set_volume_handle_for_tr( + volume_handle, &a, &b); + } + } + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE) { + volume_handle = le16_to_cpu(element->VolDevHandle); + leapioraid_scsihost_set_volume_handle_for_tr( + volume_handle, &a, &b); + } + } + if (a) + leapioraid_scsihost_tm_tr_volume_send(ioc, a); + if (b) + leapioraid_scsihost_tm_tr_volume_send(ioc, b); + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode != LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE) + continue; + handle = le16_to_cpu(element->PhysDiskDevHandle); + volume_handle = le16_to_cpu(element->VolDevHandle); + clear_bit(handle, ioc->pd_handles); + if (!volume_handle) + leapioraid_scsihost_tm_tr_send(ioc, handle); + else if (volume_handle == a || volume_handle == b) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + BUG_ON(!delayed_tr); + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + } else + leapioraid_scsihost_tm_tr_send(ioc, handle); + } +} + +static void +leapioraid_scsihost_check_volume_delete_events( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrVol_t *event_data) +{ + u32 state; + + if (event_data->ReasonCode != LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + state = le32_to_cpu(event_data->NewValue); + if (state == LEAPIORAID_RAID_VOL_STATE_MISSING || state == + LEAPIORAID_RAID_VOL_STATE_FAILED) + leapioraid_scsihost_set_volume_delete_flag( + ioc, le16_to_cpu(event_data->VolDevHandle)); +} + +static int +leapioraid_scsihost_set_satl_pending( + struct scsi_cmnd *scmd, bool pending) +{ + struct LEAPIORAID_DEVICE *priv = scmd->device->hostdata; + + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + return 0; + if (pending) + return test_and_set_bit(LEAPIORAID_CMND_PENDING_BIT, + &priv->ata_command_pending); + clear_bit(LEAPIORAID_CMND_PENDING_BIT, &priv->ata_command_pending); + return 0; +} + +void +leapioraid_scsihost_flush_running_cmds( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct scsi_cmnd *scmd; + struct leapioraid_scsiio_tracker *st; + u16 smid; + u16 count = 0; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + count++; + st = leapioraid_base_scsi_cmd_priv(scmd); + if (st && st->smid == 0) + continue; + leapioraid_scsihost_set_satl_pending(scmd, false); + leapioraid_base_get_msg_frame(ioc, smid); + scsi_dma_unmap(scmd); + + leapioraid_base_clear_st(ioc, st); + if ((!leapioraid_base_pci_device_is_available(ioc)) || + (ioc->ioc_reset_status != 0) + || ioc->adapter_over_temp || ioc->remove_host) + scmd->result = DID_NO_CONNECT << 16; + else + scmd->result = DID_RESET << 16; + scsi_done(scmd); + } + dtmprintk(ioc, pr_info("%s completing %d cmds\n", + ioc->name, count)); +} + +static inline u8 scsih_is_io_belongs_to_RT_class( + struct scsi_cmnd *scmd) +{ + struct request *rq = scsi_cmd_to_rq(scmd); + + return (IOPRIO_PRIO_CLASS(req_get_ioprio(rq)) == IOPRIO_CLASS_RT); +} + +static int +leapioraid_scsihost_qcmd( + struct Scsi_Host *shost, struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct LeapioraidSCSIIOReq_t *mpi_request; + u32 mpi_control; + u16 smid; + u16 handle; + int rc = 0; + + if (ioc->logging_level & LEAPIORAID_DEBUG_SCSI) + scsi_print_command(scmd); + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + if (!(leapioraid_scsihost_allow_scmd_to_device(ioc, scmd))) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + sas_target_priv_data = sas_device_priv_data->sas_target; + handle = sas_target_priv_data->handle; + if (handle == LEAPIORAID_INVALID_DEVICE_HANDLE) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + if (sas_device_priv_data->block && + scmd->device->host->shost_state == SHOST_RECOVERY && + scmd->cmnd[0] == TEST_UNIT_READY) { + scsi_build_sense(scmd, 0, UNIT_ATTENTION, + 0x29, 0x07); + scsi_done(scmd); + goto out; + } + if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } else if (sas_target_priv_data->deleted || + sas_device_priv_data->deleted) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } else if (sas_target_priv_data->tm_busy || sas_device_priv_data->block) { + rc = SCSI_MLQUEUE_DEVICE_BUSY; + goto out; + } + do { + if (test_bit(LEAPIORAID_CMND_PENDING_BIT, + &sas_device_priv_data->ata_command_pending)) { + rc = SCSI_MLQUEUE_DEVICE_BUSY; + goto out; + } + } while (leapioraid_scsihost_set_satl_pending(scmd, true)); + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + mpi_control = LEAPIORAID_SCSIIO_CONTROL_READ; + else if (scmd->sc_data_direction == DMA_TO_DEVICE) + mpi_control = LEAPIORAID_SCSIIO_CONTROL_WRITE; + else + mpi_control = LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER; + mpi_control |= LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ; + if (sas_device_priv_data->ncq_prio_enable) { + if (scsih_is_io_belongs_to_RT_class(scmd)) + mpi_control |= 1 << LEAPIORAID_SCSIIO_CONTROL_CMDPRI_SHIFT; + } + if ((sas_device_priv_data->flags & LEAPIORAID_DEVICE_TLR_ON) && + scmd->cmd_len != 32) + mpi_control |= LEAPIORAID_SCSIIO_CONTROL_TLR_ON; + smid = leapioraid_base_get_smid_scsiio( + ioc, ioc->scsi_io_cb_idx, scmd); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = SCSI_MLQUEUE_HOST_BUSY; + leapioraid_scsihost_set_satl_pending(scmd, false); + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + if (scmd->cmd_len == 32) + mpi_control |= 4 << LEAPIORAID_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; + mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + mpi_request->Function = + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + mpi_request->Control = cpu_to_le32(mpi_control); + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); + mpi_request->MsgFlags = LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + leapioraid_base_get_sense_buffer_dma(ioc, smid); + mpi_request->SGLOffset0 = offsetof(struct LeapioraidSCSIIOReq_t, SGL) / 4; + int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + if (mpi_request->DataLength) { + if (ioc->build_sg_scmd(ioc, scmd, smid)) { + leapioraid_base_free_smid(ioc, smid); + rc = SCSI_MLQUEUE_HOST_BUSY; + leapioraid_scsihost_set_satl_pending(scmd, false); + goto out; + } + } else + ioc->build_zero_len_sge(ioc, &mpi_request->SGL); + if (likely(mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST)) { + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FASTPATH_IO) { + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 0x4000); + ioc->put_smid_fast_path(ioc, smid, handle); + } else + ioc->put_smid_scsi_io(ioc, smid, + le16_to_cpu(mpi_request->DevHandle)); + } else + ioc->put_smid_default(ioc, smid); +out: + return rc; +} + +static void +leapioraid_scsihost_normalize_sense( + char *sense_buffer, struct sense_info *data) +{ + if ((sense_buffer[0] & 0x7F) >= 0x72) { + data->skey = sense_buffer[1] & 0x0F; + data->asc = sense_buffer[2]; + data->ascq = sense_buffer[3]; + } else { + data->skey = sense_buffer[2] & 0x0F; + data->asc = sense_buffer[12]; + data->ascq = sense_buffer[13]; + } +} + +static void +leapioraid_scsihost_scsi_ioc_info( + struct LEAPIORAID_ADAPTER *ioc, struct scsi_cmnd *scmd, + struct LeapioraidSCSIIORep_t *mpi_reply, u16 smid, + u8 scsi_status, u16 error_response_count) +{ + u32 response_info; + u8 *response_bytes; + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + u8 scsi_state = mpi_reply->SCSIState; + char *desc_ioc_state = NULL; + char *desc_scsi_status = NULL; + char *desc_scsi_state = ioc->tmp_string; + u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + struct leapioraid_sas_device *sas_device = NULL; + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *priv_target = starget->hostdata; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->warpdrive_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + if (log_info == 0x31170000) + return; + switch (ioc_status) { + case LEAPIORAID_IOCSTATUS_SUCCESS: + desc_ioc_state = "success"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION: + desc_ioc_state = "invalid function"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + desc_ioc_state = "scsi recovered error"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + desc_ioc_state = "scsi invalid dev handle"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + desc_ioc_state = "scsi device not there"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN: + desc_ioc_state = "scsi data overrun"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + desc_ioc_state = "scsi data underrun"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + desc_ioc_state = "scsi io data error"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + desc_ioc_state = "scsi protocol error"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + desc_ioc_state = "scsi task terminated"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + desc_ioc_state = "scsi residual mismatch"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + desc_ioc_state = "scsi task mgmt failed"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + desc_ioc_state = "scsi ioc terminated"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + desc_ioc_state = "scsi ext terminated"; + break; + case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR: + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp guard error"; + break; + } + fallthrough; + case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR: + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp ref tag error"; + break; + } + fallthrough; + case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR: + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp app tag error"; + break; + } + fallthrough; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + desc_ioc_state = "insufficient power"; + break; + default: + desc_ioc_state = "unknown"; + break; + } + switch (scsi_status) { + case LEAPIORAID_SCSI_STATUS_GOOD: + desc_scsi_status = "good"; + break; + case LEAPIORAID_SCSI_STATUS_CHECK_CONDITION: + desc_scsi_status = "check condition"; + break; + case LEAPIORAID_SCSI_STATUS_CONDITION_MET: + desc_scsi_status = "condition met"; + break; + case LEAPIORAID_SCSI_STATUS_BUSY: + desc_scsi_status = "busy"; + break; + case LEAPIORAID_SCSI_STATUS_INTERMEDIATE: + desc_scsi_status = "intermediate"; + break; + case LEAPIORAID_SCSI_STATUS_INTERMEDIATE_CONDMET: + desc_scsi_status = "intermediate condmet"; + break; + case LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT: + desc_scsi_status = "reservation conflict"; + break; + case LEAPIORAID_SCSI_STATUS_COMMAND_TERMINATED: + desc_scsi_status = "command terminated"; + break; + case LEAPIORAID_SCSI_STATUS_TASK_SET_FULL: + desc_scsi_status = "task set full"; + break; + case LEAPIORAID_SCSI_STATUS_ACA_ACTIVE: + desc_scsi_status = "aca active"; + break; + case LEAPIORAID_SCSI_STATUS_TASK_ABORTED: + desc_scsi_status = "task aborted"; + break; + default: + desc_scsi_status = "unknown"; + break; + } + desc_scsi_state[0] = '\0'; + if (!scsi_state) + desc_scsi_state = " "; + if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID) + strcat(desc_scsi_state, "response info "); + if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + strcat(desc_scsi_state, "state terminated "); + if (scsi_state & LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS) + strcat(desc_scsi_state, "no status "); + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED) + strcat(desc_scsi_state, "autosense failed "); + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) + strcat(desc_scsi_state, "autosense valid "); + scsi_print_command(scmd); + if (priv_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + pr_warn("%s \t%s wwid(0x%016llx)\n", + ioc->name, device_str, + (unsigned long long)priv_target->sas_address); + } else { + sas_device = leapioraid_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + pr_warn( + "%s \t%s: sas_address(0x%016llx), phy(%d)\n", + ioc->name, __func__, (unsigned long long) + sas_device->sas_address, sas_device->phy); + leapioraid_scsihost_display_enclosure_chassis_info(ioc, + sas_device, + NULL, NULL); + leapioraid_sas_device_put(sas_device); + } + } + pr_warn( + "%s \thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->DevHandle), desc_ioc_state, + ioc_status, smid); + pr_warn("%s \trequest_len(%d), underflow(%d), resid(%d)\n", + ioc->name, scsi_bufflen(scmd), scmd->underflow, + scsi_get_resid(scmd)); + pr_warn("%s \ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->TaskTag), + le32_to_cpu(mpi_reply->TransferCount), scmd->result); + pr_warn("%s \tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", + ioc->name, desc_scsi_status, + scsi_status, desc_scsi_state, scsi_state); + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + + leapioraid_scsihost_normalize_sense(scmd->sense_buffer, &data); + pr_warn( + "%s \t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", + ioc->name, + data.skey, data.asc, data.ascq, + le32_to_cpu(mpi_reply->SenseCount)); + } + if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID) { + response_info = le32_to_cpu(mpi_reply->ResponseInfo); + response_bytes = (u8 *) &response_info; + leapioraid_scsihost_response_code(ioc, response_bytes[0]); + } +} + +static void +leapioraid_scsihost_turn_on_pfa_led( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LeapioraidSepRep_t mpi_reply; + struct LeapioraidSepReq_t mpi_request; + struct leapioraid_sas_device *sas_device; + + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (!sas_device) + return; + memset(&mpi_request, 0, sizeof(struct LeapioraidSepReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = + cpu_to_le32(LEAPIORAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); + mpi_request.DevHandle = cpu_to_le16(handle); + mpi_request.Flags = LEAPIORAID_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; + if ((leapioraid_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + sas_device->pfa_led_on = 1; + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, pr_info( + "%s enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + goto out; + } +out: + leapioraid_sas_device_put(sas_device); +} + +static void +leapioraid_scsihost_turn_off_pfa_led(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + struct LeapioraidSepRep_t mpi_reply; + struct LeapioraidSepReq_t mpi_request; + + memset(&mpi_request, 0, sizeof(struct LeapioraidSepReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = 0; + mpi_request.Slot = cpu_to_le16(sas_device->slot); + mpi_request.DevHandle = 0; + mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); + mpi_request.Flags = LEAPIORAID_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; + if ((leapioraid_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, pr_info( + "%s enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + return; + } +} + +static void +leapioraid_scsihost_send_event_to_turn_on_pfa_led( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = LEAPIORAID_TURN_ON_PFA_LED; + fw_event->device_handle = handle; + fw_event->ioc = ioc; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); +} + +static void +leapioraid_scsihost_smart_predicted_fault( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 from_sata_smart_polling) +{ + struct scsi_target *starget; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct LeapioraidEventNotificationRep_t *event_reply; + struct LeapioraidEventDataSasDeviceStatusChange_t *event_data; + struct leapioraid_sas_device *sas_device; + ssize_t sz; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (!sas_device) + goto out_unlock; + + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + || ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME))) + goto out_unlock; + leapioraid_scsihost_display_enclosure_chassis_info(NULL, sas_device, NULL, + starget); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (from_sata_smart_polling) + leapioraid_scsihost_send_event_to_turn_on_pfa_led(ioc, handle); + sz = offsetof(struct LeapioraidEventNotificationRep_t, EventData) + + sizeof(struct LeapioraidEventDataSasDeviceStatusChange_t); + event_reply = kzalloc(sz, GFP_ATOMIC); + if (!event_reply) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + event_reply->Function = LEAPIORAID_FUNC_EVENT_NOTIFICATION; + event_reply->Event = + cpu_to_le16(LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE); + event_reply->MsgLength = sz / 4; + event_reply->EventDataLength = + cpu_to_le16(sizeof(struct LeapioraidEventDataSasDeviceStatusChange_t) / 4); + event_data = (struct LeapioraidEventDataSasDeviceStatusChange_t *) + event_reply->EventData; + event_data->ReasonCode = LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA; + event_data->ASC = 0x5D; + event_data->DevHandle = cpu_to_le16(handle); + event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); + leapioraid_ctl_add_to_event_log(ioc, event_reply); + kfree(event_reply); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); + return; +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + goto out; +} + +static u8 +leapioraid_scsihost_io_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidSCSIIOReq_t *mpi_request; + struct LeapioraidSCSIIORep_t *mpi_reply; + struct scsi_cmnd *scmd; + u16 ioc_status, error_response_count = 0; + u32 xfer_cnt; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + u32 response_code = 0; + struct leapioraid_scsiio_tracker *st; + + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (scmd == NULL) + return 1; + leapioraid_scsihost_set_satl_pending(scmd, false); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply == NULL) { + scmd->result = DID_OK << 16; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + sas_device_priv_data->sas_target->deleted) { + scmd->result = DID_NO_CONNECT << 16; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + st = leapioraid_base_scsi_cmd_priv(scmd); + if (st->direct_io && ((ioc_status & LEAPIORAID_IOCSTATUS_MASK) + != LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED)) { + st->scmd = scmd; + st->direct_io = 0; + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + mpi_request->DevHandle = + cpu_to_le16(sas_device_priv_data->sas_target->handle); + ioc->put_smid_scsi_io(ioc, smid, + sas_device_priv_data->sas_target->handle); + return 0; + } + scsi_state = mpi_reply->SCSIState; + if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID) + response_code = le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; + if (!sas_device_priv_data->tlr_snoop_check) { + sas_device_priv_data->tlr_snoop_check++; + if ((sas_device_priv_data->flags & LEAPIORAID_DEVICE_TLR_ON) && + response_code == LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME) + sas_device_priv_data->flags &= ~LEAPIORAID_DEVICE_TLR_ON; + } + if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= LEAPIORAID_IOCSTATUS_MASK; + scsi_status = mpi_reply->SCSIStatus; + xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); + if (ioc_status == LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN + && xfer_cnt == 0 + && (scsi_status == LEAPIORAID_SCSI_STATUS_BUSY + || scsi_status == LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT + || scsi_status == LEAPIORAID_SCSI_STATUS_TASK_SET_FULL)) { + ioc_status = LEAPIORAID_IOCSTATUS_SUCCESS; + } + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + const void *sense_data = leapioraid_base_get_sense_buffer(ioc, + smid); + u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(mpi_reply->SenseCount)); + memcpy(scmd->sense_buffer, sense_data, sz); + leapioraid_scsihost_normalize_sense(scmd->sense_buffer, &data); + if (data.asc == 0x5D) + leapioraid_scsihost_smart_predicted_fault(ioc, + le16_to_cpu(mpi_reply->DevHandle), + 0); + } + switch (ioc_status) { + case LEAPIORAID_IOCSTATUS_BUSY: + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES: + scmd->result = SAM_STAT_BUSY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + scmd->result = DID_NO_CONNECT << 16; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + if (sas_device_priv_data->block) { + scmd->result = DID_TRANSPORT_DISRUPTED << 16; + goto out; + } + if (log_info == 0x31110630) { + if (scmd->retries > 2) { + scmd->result = DID_NO_CONNECT << 16; + scsi_device_set_state(scmd->device, + SDEV_OFFLINE); + } else { + scmd->result = DID_SOFT_ERROR << 16; + scmd->device->expecting_cc_ua = 1; + } + break; + } else if (log_info == 0x32010081) { + scmd->result = DID_RESET << 16; + break; + } else if ((scmd->device->channel == RAID_CHANNEL) && + (scsi_state == (LEAPIORAID_SCSI_STATE_TERMINATED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS))) { + scmd->result = DID_RESET << 16; + break; + } + scmd->result = DID_SOFT_ERROR << 16; + break; + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + scmd->result = DID_RESET << 16; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) + scmd->result = DID_SOFT_ERROR << 16; + else + scmd->result = (DID_OK << 16) | scsi_status; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + scmd->result = (DID_OK << 16) | scsi_status; + if ((scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID)) + break; + if (xfer_cnt < scmd->underflow) { + if (scsi_status == SAM_STAT_BUSY) + scmd->result = SAM_STAT_BUSY; + else + scmd->result = DID_SOFT_ERROR << 16; + } else if (scsi_state & (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS)) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { + mpi_reply->SCSIState = + LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID; + mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; + scsi_build_sense(scmd, 0, + ILLEGAL_REQUEST, 0x20, + 0); + } + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN: + scsi_set_resid(scmd, 0); + fallthrough; + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + case LEAPIORAID_IOCSTATUS_SUCCESS: + scmd->result = (DID_OK << 16) | scsi_status; + if (response_code == + LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME || + (scsi_state & (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS))) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + break; + case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR: + case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR: + fallthrough; + case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR: + fallthrough; + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION: + case LEAPIORAID_IOCSTATUS_INVALID_SGL: + case LEAPIORAID_IOCSTATUS_INTERNAL_ERROR: + case LEAPIORAID_IOCSTATUS_INVALID_FIELD: + case LEAPIORAID_IOCSTATUS_INVALID_STATE: + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + default: + scmd->result = DID_SOFT_ERROR << 16; + break; + } + if (scmd->result && (ioc->logging_level & LEAPIORAID_DEBUG_REPLY)) + leapioraid_scsihost_scsi_ioc_info( + ioc, scmd, mpi_reply, smid, scsi_status, + error_response_count); +out: + scsi_dma_unmap(scmd); + leapioraid_base_free_smid(ioc, smid); + scsi_done(scmd); + return 0; +} + +static void +leapioraid_scsihost_update_vphys_after_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + u16 sz, ioc_status; + int i; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found = 0, port_id; + struct LeapioraidSasPhyP0_t phy_pg0; + struct leapioraid_hba_port *port, *port_next, *mport; + struct leapioraid_virtual_phy *vphy, *vphy_next; + struct leapioraid_sas_device *sas_device; + + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, + list) { + vphy->flags |= LEAPIORAID_VPHY_FLAG_DIRTY_PHY; + } + } + sz = offsetof(struct LeapioraidSasIOUnitP0_t, PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + continue; + if (!(le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) + & LEAPIORAID_SAS_DEVICE_INFO_SEP)) + continue; + if ((leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + if (! + (le32_to_cpu(phy_pg0.PhyInfo) & + LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (leapioraid_scsihost_get_sas_address + (ioc, attached_handle, &attached_sas_addr) + != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + found = 0; + port = port_next = NULL; + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, + list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (! + (vphy->flags & LEAPIORAID_VPHY_FLAG_DIRTY_PHY)) + continue; + if (vphy->sas_address != attached_sas_addr) + continue; + if (!(vphy->phy_mask & (1 << i))) + vphy->phy_mask = (1 << i); + port_id = sas_iounit_pg0->PhyData[i].Port; + mport = + leapioraid_get_port_by_id(ioc, port_id, 1); + if (!mport) { + mport = + kzalloc(sizeof(struct leapioraid_hba_port), + GFP_KERNEL); + if (!mport) { + pr_err( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, + __LINE__, __func__); + break; + } + mport->port_id = port_id; + pr_err( + "%s %s: hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, __func__, mport, + mport->port_id); + list_add_tail(&mport->list, + &ioc->port_table_list); + } + if (port != mport) { + if (!mport->vphys_mask) + INIT_LIST_HEAD(&mport->vphys_list); + mport->vphys_mask |= (1 << i); + port->vphys_mask &= ~(1 << i); + list_move(&vphy->list, + &mport->vphys_list); + sas_device = + leapioraid_get_sdev_by_addr(ioc, + attached_sas_addr, + port); + if (sas_device) + sas_device->port = mport; + } + if (mport->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) { + mport->sas_address = 0; + mport->phy_mask = 0; + mport->flags &= + ~LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + } + vphy->flags &= ~LEAPIORAID_VPHY_FLAG_DIRTY_PHY; + found = 1; + break; + } + if (found) + break; + } + } +out: + kfree(sas_iounit_pg0); +} + +static u8 +leapioraid_scsihost_get_port_table_after_reset( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port_table) +{ + u16 sz, ioc_status; + int i, j; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found = 0, port_count = 0, port_id; + + sz = offsetof(struct LeapioraidSasIOUnitP0_t, PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return port_count; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + found = 0; + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + continue; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (leapioraid_scsihost_get_sas_address + (ioc, attached_handle, &attached_sas_addr) + != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + for (j = 0; j < port_count; j++) { + port_id = sas_iounit_pg0->PhyData[i].Port; + if ((port_table[j].port_id == port_id) && + (port_table[j].sas_address == attached_sas_addr)) { + port_table[j].phy_mask |= (1 << i); + found = 1; + break; + } + } + if (found) + continue; + port_id = sas_iounit_pg0->PhyData[i].Port; + port_table[port_count].port_id = port_id; + port_table[port_count].phy_mask = (1 << i); + port_table[port_count].sas_address = attached_sas_addr; + port_count++; + } +out: + kfree(sas_iounit_pg0); + return port_count; +} + +enum hba_port_matched_codes { + NOT_MATCHED = 0, + MATCHED_WITH_ADDR_AND_PHYMASK, + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, + MATCHED_WITH_ADDR_AND_SUBPHYMASK, + MATCHED_WITH_ADDR, +}; +static int +leapioraid_scsihost_look_and_get_matched_port_entry( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port_entry, + struct leapioraid_hba_port **matched_port_entry, + int *count) +{ + struct leapioraid_hba_port *port_table_entry, *matched_port = NULL; + enum hba_port_matched_codes matched_code = NOT_MATCHED; + int lcount = 0; + + *matched_port_entry = NULL; + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + if (!(port_table_entry->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT)) + continue; + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask == port_entry->phy_mask)) { + matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; + matched_port = port_table_entry; + break; + } + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask & port_entry->phy_mask) + && (port_table_entry->port_id == port_entry->port_id)) { + matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; + matched_port = port_table_entry; + continue; + } + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask & port_entry->phy_mask)) { + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; + matched_port = port_table_entry; + continue; + } + if (port_table_entry->sas_address == port_entry->sas_address) { + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) + continue; + matched_code = MATCHED_WITH_ADDR; + matched_port = port_table_entry; + lcount++; + } + } + *matched_port_entry = matched_port; + if (matched_code == MATCHED_WITH_ADDR) + *count = lcount; + return matched_code; +} + +static void +leapioraid_scsihost_del_phy_part_of_anther_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port_table, + int index, u8 port_count, int offset) +{ + struct leapioraid_raid_sas_node *sas_node = &ioc->sas_hba; + u32 i, found = 0; + + for (i = 0; i < port_count; i++) { + if (i == index) + continue; + if (port_table[i].phy_mask & (1 << offset)) { + leapioraid_transport_del_phy_from_an_existing_port( + ioc, + sas_node, + &sas_node->phy + [offset]); + found = 1; + break; + } + } + if (!found) + port_table[index].phy_mask |= (1 << offset); +} + +static void +leapioraid_scsihost_add_or_del_phys_from_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *hba_port_entry, + struct leapioraid_hba_port *port_table, + int index, u8 port_count) +{ + u32 phy_mask, offset = 0; + struct leapioraid_raid_sas_node *sas_node = &ioc->sas_hba; + + phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; + for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { + if (phy_mask & (1 << offset)) { + if (!(port_table[index].phy_mask & (1 << offset))) { + leapioraid_scsihost_del_phy_part_of_anther_port( + ioc, + port_table, + index, + port_count, + offset); + } else { +#if defined(LEAPIORAID_WIDE_PORT_API) + if (sas_node->phy[offset].phy_belongs_to_port) + leapioraid_transport_del_phy_from_an_existing_port + (ioc, sas_node, + &sas_node->phy[offset]); + leapioraid_transport_add_phy_to_an_existing_port + (ioc, sas_node, &sas_node->phy[offset], + hba_port_entry->sas_address, + hba_port_entry); +#endif + } + } + } +} + +static void +leapioraid_scsihost_del_dirty_vphy(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_hba_port *port, *port_next; + struct leapioraid_virtual_phy *vphy, *vphy_next; + + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, + list) { + if (vphy->flags & LEAPIORAID_VPHY_FLAG_DIRTY_PHY) { + drsprintk(ioc, pr_err( + "%s Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", + ioc->name, vphy, + port->port_id, + vphy->phy_mask)); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + } + if (!port->vphys_mask && !port->sas_address) + port->flags |= LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + } +} + +static void +leapioraid_scsihost_del_dirty_port_entries( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_hba_port *port, *port_next; + + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!(port->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) || + port->flags & LEAPIORAID_HBA_PORT_FLAG_NEW_PORT) + continue; + drsprintk(ioc, pr_err( + "%s Deleting port table entry %p having Port id: %d\t, Phy_mask 0x%08x\n", + ioc->name, port, port->port_id, + port->phy_mask)); + list_del(&port->list); + kfree(port); + } +} + +static void +leapioraid_scsihost_sas_port_refresh(struct LEAPIORAID_ADAPTER *ioc) +{ + u8 port_count = 0; + struct leapioraid_hba_port *port_table; + struct leapioraid_hba_port *port_table_entry; + struct leapioraid_hba_port *port_entry = NULL; + int i, j, ret, count = 0, lcount = 0; + u64 sas_addr; + u8 num_phys; + + drsprintk(ioc, pr_err( + "%s updating ports for sas_host(0x%016llx)\n", + ioc->name, + (unsigned long long)ioc->sas_hba.sas_address)); + leapioraid_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if (num_phys > ioc->sas_hba.nr_phys_allocated) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + port_table = kcalloc(ioc->sas_hba.num_phys, + sizeof(struct leapioraid_hba_port), GFP_KERNEL); + if (!port_table) + return; + port_count = leapioraid_scsihost_get_port_table_after_reset( + ioc, port_table); + if (!port_count) + return; + drsprintk(ioc, + pr_info("%s New Port table\n", ioc->name)); + for (j = 0; j < port_count; j++) + drsprintk(ioc, pr_err( + "%s Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + ioc->name, port_table[j].port_id, + port_table[j].phy_mask, + port_table[j].sas_address)); + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + port_table_entry->flags |= LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + } + drsprintk(ioc, + pr_info("%s Old Port table\n", ioc->name)); + port_table_entry = NULL; + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + drsprintk(ioc, pr_err( + "%s Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + ioc->name, port_table_entry->port_id, + port_table_entry->phy_mask, + port_table_entry->sas_address)); + } + for (j = 0; j < port_count; j++) { + ret = leapioraid_scsihost_look_and_get_matched_port_entry(ioc, + &port_table[j], + &port_entry, + &count); + if (!port_entry) { + drsprintk(ioc, pr_err( + "%s No Matched entry for sas_addr(0x%16llx), Port:%d\n", + ioc->name, + port_table[j].sas_address, + port_table[j].port_id)); + continue; + } + switch (ret) { + case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: + case MATCHED_WITH_ADDR_AND_SUBPHYMASK: + leapioraid_scsihost_add_or_del_phys_from_existing_port(ioc, + port_entry, + port_table, + j, + port_count); + break; + case MATCHED_WITH_ADDR: + sas_addr = port_table[j].sas_address; + for (i = 0; i < port_count; i++) { + if (port_table[i].sas_address == sas_addr) + lcount++; + } + if ((count > 1) || (lcount > 1)) + port_entry = NULL; + else + leapioraid_scsihost_add_or_del_phys_from_existing_port + (ioc, port_entry, port_table, j, + port_count); + } + if (!port_entry) + continue; + if (port_entry->port_id != port_table[j].port_id) + port_entry->port_id = port_table[j].port_id; + port_entry->flags &= ~LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + port_entry->phy_mask = port_table[j].phy_mask; + } + port_table_entry = NULL; +} + +static +struct leapioraid_virtual_phy *leapioraid_scsihost_alloc_vphy( + struct LEAPIORAID_ADAPTER *ioc, + u8 port_id, u8 phy_num) +{ + struct leapioraid_virtual_phy *vphy; + struct leapioraid_hba_port *port; + + port = leapioraid_get_port_by_id(ioc, port_id, 0); + if (!port) + return NULL; + vphy = leapioraid_get_vphy_by_phy(ioc, port, phy_num); + if (!vphy) { + vphy = kzalloc(sizeof(struct leapioraid_virtual_phy), GFP_KERNEL); + if (!vphy) + return NULL; + if (!port->vphys_mask) + INIT_LIST_HEAD(&port->vphys_list); + port->vphys_mask |= (1 << phy_num); + vphy->phy_mask |= (1 << phy_num); + list_add_tail(&vphy->list, &port->vphys_list); + pr_info( + "%s vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", + ioc->name, vphy, port->port_id, phy_num); + } + return vphy; +} + +static void +leapioraid_scsihost_sas_host_refresh(struct LEAPIORAID_ADAPTER *ioc) +{ + u16 sz; + u16 ioc_status; + int i; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u8 link_rate, port_id; + struct leapioraid_hba_port *port; + struct LeapioraidSasPhyP0_t phy_pg0; + + dtmprintk(ioc, pr_err( + "%s updating handles for sas_host(0x%016llx)\n", + ioc->name, + (unsigned long long)ioc->sas_hba.sas_address)); + sz = offsetof(struct LeapioraidSasIOUnitP0_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; + if (i == 0) + ioc->sas_hba.handle = + le16_to_cpu(sas_iounit_pg0->PhyData[0].ControllerDevHandle); + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(leapioraid_get_port_by_id(ioc, port_id, 0))) { + port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_KERNEL); + if (!port) + goto out; + + port->port_id = port_id; + pr_info( + "%s hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + if (ioc->shost_recovery) + port->flags = LEAPIORAID_HBA_PORT_FLAG_NEW_PORT; + list_add_tail(&port->list, &ioc->port_table_list); + } + if (le32_to_cpu + (sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) + & LEAPIORAID_SAS_DEVICE_INFO_SEP + && (link_rate >= LEAPIORAID_SAS_NEG_LINK_RATE_1_5)) { + if ((leapioraid_config_get_phy_pg0 + (ioc, &mpi_reply, &phy_pg0, i))) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + continue; + } + if (! + (le32_to_cpu(phy_pg0.PhyInfo) & + LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + if (!leapioraid_scsihost_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (attached_handle + && link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + link_rate = LEAPIORAID_SAS_NEG_LINK_RATE_1_5; + ioc->sas_hba.phy[i].port = + leapioraid_get_port_by_id(ioc, port_id, 0); + if (!ioc->sas_hba.phy[i].phy) { + if ((leapioraid_config_get_phy_pg0 + (ioc, &mpi_reply, &phy_pg0, i))) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + continue; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + continue; + } + ioc->sas_hba.phy[i].phy_id = i; + leapioraid_transport_add_host_phy(ioc, + &ioc->sas_hba.phy[i], + phy_pg0, + ioc->sas_hba.parent_dev); + continue; + } + leapioraid_transport_update_links(ioc, ioc->sas_hba.sas_address, + attached_handle, i, link_rate, + ioc->sas_hba.phy[i].port); + } +out: + kfree(sas_iounit_pg0); +} + +static void +leapioraid_scsihost_sas_host_add(struct LEAPIORAID_ADAPTER *ioc) +{ + int i; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + struct LeapioraidSasPhyP0_t phy_pg0; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidSasEncP0_t enclosure_pg0; + u16 ioc_status; + u16 sz; + u8 device_missing_delay; + u8 num_phys, port_id; + struct leapioraid_hba_port *port; + + leapioraid_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.nr_phys_allocated = + max_t(u8, LEAPIORAID_MAX_HBA_NUM_PHYS, num_phys); + ioc->sas_hba.phy = + kcalloc(ioc->sas_hba.nr_phys_allocated, + sizeof(struct leapioraid_sas_phy), + GFP_KERNEL); + if (!ioc->sas_hba.phy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + sz = offsetof(struct LeapioraidSasIOUnitP0_t, + PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + sz = offsetof(struct LeapioraidSasIOUnitP1_t, + PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->io_missing_delay = sas_iounit_pg1->IODeviceMissingDelay; + device_missing_delay = sas_iounit_pg1->ReportDeviceMissingDelay; + if (device_missing_delay & LEAPIORAID_SASIOUNIT1_REPORT_MISSING_UNIT_16) + ioc->device_missing_delay = (device_missing_delay & + LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) + * 16; + else + ioc->device_missing_delay = device_missing_delay & + LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if ((leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + if (i == 0) + ioc->sas_hba.handle = + le16_to_cpu(sas_iounit_pg0->PhyData[0].ControllerDevHandle); + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(leapioraid_get_port_by_id(ioc, port_id, 0))) { + port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_KERNEL); + if (!port) + goto out; + + port->port_id = port_id; + pr_info( + "%s hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + list_add_tail(&port->list, &ioc->port_table_list); + } + if ((le32_to_cpu(phy_pg0.PhyInfo) & + LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY) + && (phy_pg0.NegotiatedLinkRate >> 4) >= + LEAPIORAID_SAS_NEG_LINK_RATE_1_5) { + if (!leapioraid_scsihost_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + ioc->sas_hba.phy[i].phy_id = i; + ioc->sas_hba.phy[i].port = + leapioraid_get_port_by_id(ioc, port_id, 0); + leapioraid_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], + phy_pg0, + ioc->sas_hba.parent_dev); + } + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + pr_info( + "%s host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + ioc->name, + ioc->sas_hba.handle, + (unsigned long long)ioc->sas_hba.sas_address, + ioc->sas_hba.num_phys); + if (ioc->sas_hba.enclosure_handle) { + if (!(leapioraid_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_pg0, + LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE, + ioc->sas_hba.enclosure_handle))) + ioc->sas_hba.enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + } +out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); +} + +static int +leapioraid_scsihost_expander_add( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_sas_node *sas_expander; + struct leapioraid_enclosure_node *enclosure_dev; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidExpanderP0_t expander_pg0; + struct LeapioraidExpanderP1_t expander_pg1; + u32 ioc_status; + u16 parent_handle; + u64 sas_address, sas_address_parent = 0; + int i; + unsigned long flags; + u8 port_id; + struct leapioraid_sas_port *leapioraid_port = NULL; + int rc = 0; + + if (!handle) + return -1; + if (ioc->shost_recovery || ioc->pci_error_recovery) + return -1; + if ((leapioraid_config_get_expander_pg0( + ioc, &mpi_reply, &expander_pg0, + LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); + if (leapioraid_scsihost_get_sas_address( + ioc, parent_handle, &sas_address_parent) + != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + port_id = expander_pg0.PhysicalPort; + if (sas_address_parent != ioc->sas_hba.sas_address) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = + leapioraid_scsihost_expander_find_by_sas_address( + ioc, + sas_address_parent, + leapioraid_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (!sas_expander) { + rc = leapioraid_scsihost_expander_add(ioc, parent_handle); + if (rc != 0) + return rc; + } + } + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + sas_expander = leapioraid_scsihost_expander_find_by_sas_address( + ioc, + sas_address, + leapioraid_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + return 0; + sas_expander = kzalloc(sizeof(struct leapioraid_raid_sas_node), + GFP_KERNEL); + if (!sas_expander) + return -1; + + sas_expander->handle = handle; + sas_expander->num_phys = expander_pg0.NumPhys; + sas_expander->sas_address_parent = sas_address_parent; + sas_expander->sas_address = sas_address; + sas_expander->port = leapioraid_get_port_by_id(ioc, port_id, 0); + if (!sas_expander->port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + pr_info( + "%s expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + ioc->name, + handle, parent_handle, + (unsigned long long)sas_expander->sas_address, + sas_expander->num_phys); + if (!sas_expander->num_phys) { + rc = -1; + goto out_fail; + } + sas_expander->phy = kcalloc(sas_expander->num_phys, + sizeof(struct leapioraid_sas_phy), GFP_KERNEL); + if (!sas_expander->phy) { + rc = -1; + goto out_fail; + } + INIT_LIST_HEAD(&sas_expander->sas_port_list); + leapioraid_port = leapioraid_transport_port_add( + ioc, handle, + sas_address_parent, + sas_expander->port); + if (!leapioraid_port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->parent_dev = &leapioraid_port->rphy->dev; + sas_expander->rphy = leapioraid_port->rphy; + for (i = 0; i < sas_expander->num_phys; i++) { + if ((leapioraid_config_get_expander_pg1( + ioc, &mpi_reply, + &expander_pg1, i, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->phy[i].handle = handle; + sas_expander->phy[i].phy_id = i; + sas_expander->phy[i].port = + leapioraid_get_port_by_id(ioc, port_id, 0); + if ((leapioraid_transport_add_expander_phy + (ioc, &sas_expander->phy[i], expander_pg1, + sas_expander->parent_dev))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + } + if (sas_expander->enclosure_handle) { + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle( + ioc, + sas_expander->enclosure_handle); + if (enclosure_dev) + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + } + leapioraid_scsihost_expander_node_add(ioc, sas_expander); + return 0; +out_fail: + if (leapioraid_port) + leapioraid_transport_port_remove(ioc, + sas_expander->sas_address, + sas_address_parent, + sas_expander->port); + kfree(sas_expander); + return rc; +} + +void +leapioraid_expander_remove( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + struct leapioraid_raid_sas_node *sas_expander; + unsigned long flags; + + if (ioc->shost_recovery) + return; + if (!port) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = leapioraid_scsihost_expander_find_by_sas_address( + ioc, sas_address, port); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + leapioraid_scsihost_expander_node_remove( + ioc, sas_expander); +} + +static u8 +leapioraid_scsihost_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (ioc->scsih_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->scsih_cmds.smid != smid) + return 1; + ioc->scsih_cmds.status |= LEAPIORAID_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->scsih_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc->scsih_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + } + ioc->scsih_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->scsih_cmds.done); + return 1; +} + +static int +leapioraid_scsi_send_scsi_io( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsi_io_transfer *transfer_packet, + u8 tr_timeout, u8 tr_method) +{ + struct LeapioraidSCSIIORep_t *mpi_reply; + struct LeapioSCSIIOReq_t *mpi_request; + u16 smid; + u8 issue_reset = 0; + int rc; + void *priv_sense; + u32 mpi_control; + void *psge; + dma_addr_t data_out_dma = 0; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + size_t data_out_sz = 0; + u16 handle; + u8 retry_count = 0, host_reset_count = 0; + int tm_return_code; + + if (ioc->pci_error_recovery) { + pr_err("%s %s: pci error recovery in progress!\n", + ioc->name, __func__); + return -EFAULT; + } + if (ioc->shost_recovery) { + pr_info("%s %s: host recovery in progress!\n", + ioc->name, __func__); + return -EAGAIN; + } + handle = transfer_packet->handle; + if (handle == LEAPIORAID_INVALID_DEVICE_HANDLE) { + pr_info("%s %s: no device!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: scsih_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } +retry_loop: + if (test_bit(handle, ioc->device_remove_in_progress)) { + pr_info("%s %s: device removal in progress\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = ioc->shost->can_queue + + LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY; + rc = 0; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioSCSIIOReq_t)); + if (transfer_packet->is_raid) + mpi_request->Function = + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + switch (transfer_packet->dir) { + case DMA_TO_DEVICE: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_WRITE; + data_out_dma = transfer_packet->data_dma; + data_out_sz = transfer_packet->data_length; + break; + case DMA_FROM_DEVICE: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_READ; + data_in_dma = transfer_packet->data_dma; + data_in_sz = transfer_packet->data_length; + break; + case DMA_BIDIRECTIONAL: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_BIDIRECTIONAL; + BUG(); + break; + default: + case DMA_NONE: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER; + break; + } + psge = &mpi_request->SGL; + ioc->build_sg( + ioc, psge, data_out_dma, + data_out_sz, data_in_dma, + data_in_sz); + mpi_request->Control = cpu_to_le32(mpi_control | + LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ); + mpi_request->DataLength = cpu_to_le32(transfer_packet->data_length); + mpi_request->MsgFlags = LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + leapioraid_base_get_sense_buffer_dma(ioc, smid); + priv_sense = leapioraid_base_get_sense_buffer(ioc, smid); + mpi_request->SGLOffset0 = offsetof(struct LeapioSCSIIOReq_t, SGL) / 4; + mpi_request->IoFlags = cpu_to_le16(transfer_packet->cdb_length); + int_to_scsilun(transfer_packet->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, transfer_packet->cdb, + transfer_packet->cdb_length); + init_completion(&ioc->scsih_cmds.done); + if (likely(mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST)) + ioc->put_smid_scsi_io(ioc, smid, handle); + else + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + transfer_packet->timeout * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, + mpi_request, + sizeof(struct LeapioSCSIIOReq_t) / 4, + issue_reset); + goto issue_target_reset; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + transfer_packet->valid_reply = 1; + mpi_reply = ioc->scsih_cmds.reply; + transfer_packet->sense_length = + le32_to_cpu(mpi_reply->SenseCount); + if (transfer_packet->sense_length) + memcpy(transfer_packet->sense, priv_sense, + transfer_packet->sense_length); + transfer_packet->transfer_length = + le32_to_cpu(mpi_reply->TransferCount); + transfer_packet->ioc_status = + le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + transfer_packet->scsi_state = mpi_reply->SCSIState; + transfer_packet->scsi_status = mpi_reply->SCSIStatus; + transfer_packet->log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + } + goto out; +issue_target_reset: + if (issue_reset) { + pr_info("%s issue target reset: handle(0x%04x)\n", ioc->name, handle); + tm_return_code = + leapioraid_scsihost_issue_locked_tm(ioc, handle, + 0xFFFFFFFF, 0xFFFFFFFF, + 0, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + smid, tr_timeout, + tr_method); + if (tm_return_code == SUCCESS) { + pr_err( + "%s target reset completed: handle (0x%04x)\n", + ioc->name, handle); + if (((ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE) + && retry_count++ < 3) + || ((ioc->scsih_cmds.status & LEAPIORAID_CMD_RESET) + && host_reset_count++ == 0)) { + pr_info("%s issue retry: handle (0x%04x)\n", + ioc->name, handle); + goto retry_loop; + } + } else + pr_err("%s target reset didn't complete: handle(0x%04x)\n", + ioc->name, handle); + rc = -EFAULT; + } else + rc = -EAGAIN; +out: + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_determine_disposition( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsi_io_transfer *transfer_packet) +{ + static enum device_responsive_state rc; + struct sense_info sense_info = { 0, 0, 0 }; + u8 check_sense = 0; + char *desc = NULL; + + if (!transfer_packet->valid_reply) + return DEVICE_READY; + switch (transfer_packet->ioc_status) { + case LEAPIORAID_IOCSTATUS_BUSY: + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + rc = DEVICE_RETRY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + if (transfer_packet->log_info == 0x31170000) { + rc = DEVICE_RETRY; + break; + } + if (transfer_packet->cdb[0] == REPORT_LUNS) + rc = DEVICE_READY; + else + rc = DEVICE_RETRY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + case LEAPIORAID_IOCSTATUS_SUCCESS: + if (!transfer_packet->scsi_state && + !transfer_packet->scsi_status) { + rc = DEVICE_READY; + break; + } + if (transfer_packet->scsi_state & + LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + rc = DEVICE_ERROR; + check_sense = 1; + break; + } + if (transfer_packet->scsi_state & + (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS | + LEAPIORAID_SCSI_STATE_TERMINATED)) { + rc = DEVICE_RETRY; + break; + } + if (transfer_packet->scsi_status >= LEAPIORAID_SCSI_STATUS_BUSY) { + rc = DEVICE_RETRY; + break; + } + rc = DEVICE_READY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + if (transfer_packet->scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + rc = DEVICE_RETRY; + else + rc = DEVICE_ERROR; + break; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + default: + rc = DEVICE_ERROR; + break; + } + if (check_sense) { + leapioraid_scsihost_normalize_sense( + transfer_packet->sense, &sense_info); + if (sense_info.skey == UNIT_ATTENTION) + rc = DEVICE_RETRY_UA; + else if (sense_info.skey == NOT_READY) { + if (sense_info.asc == 0x3a) + rc = DEVICE_READY; + else if (sense_info.asc == 0x04) { + if (sense_info.ascq == 0x03 || + sense_info.ascq == 0x0b || + sense_info.ascq == 0x0c) { + rc = DEVICE_ERROR; + } else + rc = DEVICE_START_UNIT; + } else if (sense_info.asc == 0x3e && !sense_info.ascq) + rc = DEVICE_START_UNIT; + } else if (sense_info.skey == ILLEGAL_REQUEST && + transfer_packet->cdb[0] == REPORT_LUNS) { + rc = DEVICE_READY; + } else if (sense_info.skey == MEDIUM_ERROR) { + if (sense_info.asc == 0x31) + rc = DEVICE_READY; + } else if (sense_info.skey == HARDWARE_ERROR) { + if (sense_info.asc == 0x19) + rc = DEVICE_READY; + } + } + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) { + switch (rc) { + case DEVICE_READY: + desc = "ready"; + break; + case DEVICE_RETRY: + desc = "retry"; + break; + case DEVICE_RETRY_UA: + desc = "retry_ua"; + break; + case DEVICE_START_UNIT: + desc = "start_unit"; + break; + case DEVICE_STOP_UNIT: + desc = "stop_unit"; + break; + case DEVICE_ERROR: + desc = "error"; + break; + } + pr_info( + "%s \tioc_status(0x%04x), loginfo(0x%08x),\n\t\t" + "scsi_status(0x%02x), scsi_state(0x%02x), rc(%s)\n", + ioc->name, + transfer_packet->ioc_status, + transfer_packet->log_info, + transfer_packet->scsi_status, + transfer_packet->scsi_state, + desc); + if (check_sense) + pr_info("%s \t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x]\n", + ioc->name, + sense_info.skey, sense_info.asc, + sense_info.ascq); + } + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_sn( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 **serial_number) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + u8 *inq_data; + int return_code; + u32 data_length; + u8 len; + u8 tr_timeout = 30; + u8 tr_method = 0; + + inq_data = NULL; + transfer_packet + = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + data_length = 252; + inq_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!inq_data) { + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + memset(inq_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = INQUIRY; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[2] = 0x80; + transfer_packet->cdb[4] = data_length; + transfer_packet->timeout = 30; + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + return_code = + leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_READY) { + len = strlen(&inq_data[4]) + 1; + *serial_number = kmalloc(len, GFP_KERNEL); + if (*serial_number) + strscpy(*serial_number, &inq_data[4], sizeof(*serial_number)); + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + if (inq_data) + dma_free_coherent(&ioc->pdev->dev, data_length, inq_data, + transfer_packet->data_dma); + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_supported_pages( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u32 lun, void *data, + u32 data_length) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + void *inq_data; + int return_code; + + inq_data = NULL; + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + inq_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!inq_data) { + rc = DEVICE_RETRY; + goto out; + } + rc = DEVICE_READY; + memset(inq_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 6; + transfer_packet->lun = lun; + transfer_packet->cdb[0] = INQUIRY; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[4] = data_length; + transfer_packet->timeout = 30; + return_code = leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, 30, 0); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_READY) + memcpy(data, inq_data, data_length); + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + if (inq_data) + dma_free_coherent(&ioc->pdev->dev, data_length, inq_data, + transfer_packet->data_dma); + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_report_luns( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, void *data, + u32 data_length, u8 retry_count, u8 is_pd, u8 tr_timeout, + u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + void *lun_data; + int return_code; + int retries; + + lun_data = NULL; + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + lun_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!lun_data) { + rc = DEVICE_RETRY; + goto out; + } + for (retries = 0; retries < 4; retries++) { + rc = DEVICE_ERROR; + pr_info("%s REPORT_LUNS: handle(0x%04x), retries(%d)\n", + ioc->name, handle, retries); + memset(lun_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 12; + transfer_packet->cdb[0] = REPORT_LUNS; + transfer_packet->cdb[6] = (data_length >> 24) & 0xFF; + transfer_packet->cdb[7] = (data_length >> 16) & 0xFF; + transfer_packet->cdb[8] = (data_length >> 8) & 0xFF; + transfer_packet->cdb[9] = data_length & 0xFF; + transfer_packet->timeout = 30; + transfer_packet->is_raid = is_pd; + return_code = + leapioraid_scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, + tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition(ioc, + transfer_packet); + if (rc == DEVICE_READY) { + memcpy(data, lun_data, data_length); + goto out; + } else if (rc == DEVICE_ERROR) + goto out; + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + } +out: + if (lun_data) + dma_free_coherent(&ioc->pdev->dev, data_length, lun_data, + transfer_packet->data_dma); + kfree(transfer_packet); + if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_RETRY_UA) && retry_count >= 144) + rc = DEVICE_ERROR; + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_start_unit( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, u32 lun, + u8 is_pd, u8 tr_timeout, u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + int return_code; + + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + transfer_packet->handle = handle; + transfer_packet->dir = DMA_NONE; + transfer_packet->lun = lun; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = START_STOP; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[4] = 1; + transfer_packet->timeout = 30; + transfer_packet->is_raid = is_pd; + pr_info("%s START_UNIT: handle(0x%04x), lun(%d)\n", + ioc->name, handle, lun); + return_code = + leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_test_unit_ready( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, u32 lun, + u8 is_pd, u8 tr_timeout, u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + int return_code; + int sata_init_failure = 0; + + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + rc = DEVICE_READY; + transfer_packet->handle = handle; + transfer_packet->dir = DMA_NONE; + transfer_packet->lun = lun; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = TEST_UNIT_READY; + transfer_packet->timeout = 30; + transfer_packet->is_raid = is_pd; +sata_init_retry: + pr_info("%s TEST_UNIT_READY: handle(0x%04x), lun(%d)\n", + ioc->name, handle, lun); + return_code = + leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_RETRY && + transfer_packet->log_info == 0x31111000) { + if (!sata_init_failure++) { + pr_err( + "%s SATA Initialization Timeout,sending a retry\n", + ioc->name); + rc = DEVICE_READY; + goto sata_init_retry; + } else { + pr_err( + "%s SATA Initialization Failed\n", + ioc->name); + rc = DEVICE_ERROR; + } + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_ata_pass_thru_idd( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 *is_ssd_device, u8 tr_timeout, u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + u16 *idd_data; + int return_code; + u32 data_length; + + idd_data = NULL; + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + data_length = 512; + idd_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!idd_data) { + rc = DEVICE_RETRY; + goto out; + } + rc = DEVICE_READY; + memset(idd_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 12; + transfer_packet->cdb[0] = ATA_12; + transfer_packet->cdb[1] = 0x8; + transfer_packet->cdb[2] = 0xd; + transfer_packet->cdb[3] = 0x1; + transfer_packet->cdb[9] = 0xec; + transfer_packet->timeout = 30; + return_code = leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, 30, 0); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_READY) { + if (le16_to_cpu(idd_data[217]) == 1) + *is_ssd_device = 1; + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + if (idd_data) { + dma_free_coherent(&ioc->pdev->dev, data_length, idd_data, + transfer_packet->data_dma); + } + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_wait_for_device_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + int lun, u8 tr_timeout, u8 tr_method) +{ + enum device_responsive_state rc; + + if (ioc->pci_error_recovery) + return DEVICE_ERROR; + if (ioc->shost_recovery) + return DEVICE_RETRY; + rc = leapioraid_scsihost_test_unit_ready( + ioc, handle, lun, is_pd, tr_timeout, + tr_method); + if (rc == DEVICE_READY || rc == DEVICE_ERROR) + return rc; + else if (rc == DEVICE_START_UNIT) { + rc = leapioraid_scsihost_start_unit( + ioc, handle, lun, is_pd, tr_timeout, + tr_method); + if (rc == DEVICE_ERROR) + return rc; + rc = leapioraid_scsihost_test_unit_ready( + ioc, handle, lun, is_pd, + tr_timeout, tr_method); + } + if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_RETRY_UA) && retry_count >= 144) + rc = DEVICE_ERROR; + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_wait_for_target_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + u8 tr_timeout, u8 tr_method) +{ + enum device_responsive_state rc; + struct scsi_lun *lun_data; + u32 length, num_luns; + u8 *data; + int lun; + struct scsi_lun *lunp; + + lun_data = + kcalloc(255, sizeof(struct scsi_lun), GFP_KERNEL); + if (!lun_data) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return DEVICE_RETRY; + } + rc = leapioraid_scsihost_report_luns(ioc, handle, lun_data, + 255 * sizeof(struct scsi_lun), + retry_count, is_pd, tr_timeout, tr_method); + if (rc != DEVICE_READY) + goto out; + data = (u8 *) lun_data; + length = ((data[0] << 24) | (data[1] << 16) | + (data[2] << 8) | (data[3] << 0)); + num_luns = (length / sizeof(struct scsi_lun)); + lunp = &lun_data[1]; + lun = (num_luns) ? scsilun_to_int(&lun_data[1]) : 0; + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, handle, retry_count, + is_pd, lun, tr_timeout, + tr_method); + if (rc == DEVICE_ERROR) { + struct scsi_lun *lunq; + + for (lunq = lunp++; lunq <= &lun_data[num_luns]; lunq++) { + rc = leapioraid_scsihost_wait_for_device_to_become_ready(ioc, + handle, + retry_count, + is_pd, + scsilun_to_int + (lunq), + tr_timeout, + tr_method); + if (rc != DEVICE_ERROR) + goto out; + } + } +out: + kfree(lun_data); + return rc; +} + +static u8 +leapioraid_scsihost_check_access_status( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + u16 handle, u8 access_status) +{ + u8 rc = 1; + char *desc = NULL; + + switch (access_status) { + case LEAPIORAID_SAS_DEVICE0_ASTATUS_NO_ERRORS: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: + rc = 0; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: + desc = "sata capability failed"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: + desc = "sata affiliation conflict"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: + desc = "route not addressable"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: + desc = "smp error not addressable"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: + desc = "device blocked"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_DIAG: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MAX: + desc = "sata initialization failed"; + break; + default: + desc = "unknown"; + break; + } + if (!rc) + return 0; + pr_err( + "%s discovery errors(%s): sas_address(0x%016llx),\n\t\t" + "handle(0x%04x)\n", + ioc->name, + desc, + (unsigned long long)sas_address, + handle); + return rc; +} + +static void +leapioraid_scsihost_check_device(struct LEAPIORAID_ADAPTER *ioc, + u64 parent_sas_address, u16 handle, u8 phy_number, + u8 link_rate) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct leapioraid_sas_device *sas_device = NULL; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + u32 ioc_status; + unsigned long flags; + u64 sas_address; + struct scsi_target *starget; + struct LEAPIORAID_TARGET *sas_target_priv_data; + u32 device_info; + u8 *serial_number = NULL; + u8 *original_serial_number = NULL; + int rc; + struct leapioraid_hba_port *port; + + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) + return; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + return; + if (phy_number != sas_device_pg0.PhyNum) + return; + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(leapioraid_scsihost_is_sas_end_device(device_info))) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + port = leapioraid_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); + if (!port) + goto out_unlock; + sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port); + if (!sas_device) + goto out_unlock; + if (unlikely(sas_device->handle != handle)) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + starget_printk(KERN_INFO, starget, + "handle changed from(0x%04x) to (0x%04x)!!!\n", + sas_device->handle, handle); + sas_target_priv_data->handle = handle; + sas_device->handle = handle; + if (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0.ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + sas_device->is_chassis_slot_valid = 0; + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + sas_device->enclosure_handle); + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + } + if (!(le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + pr_err("%s device is not present handle(0x%04x), flags!!!\n", + ioc->name, handle); + goto out_unlock; + } + if (leapioraid_scsihost_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + goto out_unlock; + original_serial_number = sas_device->serial_number; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + leapioraid_scsihost_ublock_io_device_wait(ioc, sas_address, port); + if (!original_serial_number) + goto out; + if (leapioraid_scsihost_inquiry_vpd_sn(ioc, handle, &serial_number) == + DEVICE_READY && serial_number) { + rc = strcmp(original_serial_number, serial_number); + kfree(serial_number); + if (!rc) + goto out; + leapioraid_device_remove_by_sas_address(ioc, sas_address, port); + leapioraid_transport_update_links(ioc, parent_sas_address, + handle, phy_number, link_rate, + port); + leapioraid_scsihost_add_device(ioc, handle, 0, 0); + } + goto out; +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); +} + +static int +leapioraid_scsihost_add_device( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, u8 retry_count, + u8 is_pd) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct leapioraid_sas_device *sas_device; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + u32 ioc_status; + u64 sas_address; + u32 device_info; + enum device_responsive_state rc; + u8 connector_name[5], port_id; + + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 0; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 0; + } + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(leapioraid_scsihost_is_sas_end_device(device_info))) + return 0; + set_bit(handle, ioc->pend_os_device_add); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + if (!(le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + pr_err("%s device is not present handle(0x04%x)!!!\n", + ioc->name, handle); + return 0; + } + if (leapioraid_scsihost_check_access_status( + ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + return 0; + port_id = sas_device_pg0.PhysicalPort; + sas_device = leapioraid_get_sdev_by_addr(ioc, + sas_address, + leapioraid_get_port_by_id(ioc, port_id, 0)); + if (sas_device) { + clear_bit(handle, ioc->pend_os_device_add); + leapioraid_sas_device_put(sas_device); + return 0; + } + if (le16_to_cpu(sas_device_pg0.EnclosureHandle)) { + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + le16_to_cpu + (sas_device_pg0.EnclosureHandle)); + if (enclosure_dev == NULL) + pr_info( + "%s Enclosure handle(0x%04x)doesn't\n\t\t" + "match with enclosure device!\n", + ioc->name, + le16_to_cpu(sas_device_pg0.EnclosureHandle)); + } + if (!ioc->wait_for_discovery_to_complete) { + pr_info( + "%s detecting: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + ioc->name, handle, + (unsigned long long)sas_address, + sas_device_pg0.PhyNum); + rc = leapioraid_scsihost_wait_for_target_to_become_ready( + ioc, handle, + retry_count, + is_pd, 30, 0); + if (rc != DEVICE_READY) { + if (le16_to_cpu(sas_device_pg0.EnclosureHandle) != 0) + dewtprintk(ioc, + pr_info("%s %s: device not ready: slot(%d)\n", + ioc->name, __func__, + le16_to_cpu(sas_device_pg0.Slot))); + if (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + memcpy(connector_name, + sas_device_pg0.ConnectorName, 4); + connector_name[4] = '\0'; + dewtprintk(ioc, + pr_info( + "%s %s: device not ready: enclosure level(0x%04x), connector name( %s)\n", + ioc->name, __func__, + sas_device_pg0.EnclosureLevel, + connector_name)); + } + if ((enclosure_dev) + && (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID)) + pr_err( + "%s chassis slot(0x%04x)\n", ioc->name, + enclosure_dev->pg0.ChassisSlot); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + return 1; + else if (rc == DEVICE_ERROR) + return 0; + } + } + sas_device = kzalloc(sizeof(struct leapioraid_sas_device), + GFP_KERNEL); + if (!sas_device) + return 0; + + kref_init(&sas_device->refcount); + sas_device->handle = handle; + if (leapioraid_scsihost_get_sas_address(ioc, + le16_to_cpu(sas_device_pg0.ParentDevHandle), + &sas_device->sas_address_parent) != 0) + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + if (sas_device->enclosure_handle != 0) + sas_device->slot = le16_to_cpu(sas_device_pg0.Slot); + sas_device->device_info = device_info; + sas_device->sas_address = sas_address; + sas_device->port = leapioraid_get_port_by_id(ioc, port_id, 0); + if (!sas_device->port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + sas_device->phy = sas_device_pg0.PhyNum; + sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? + 1 : 0; + sas_device->supports_sata_smart = + (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED); + if (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0.ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + sas_device->is_chassis_slot_valid = 0; + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); + sas_device->port_type = sas_device_pg0.MaxPortConnections; + pr_err( + "%s handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n", + ioc->name, handle, sas_device->sas_address, + sas_device->port_type); + if (ioc->wait_for_discovery_to_complete) + leapioraid_scsihost_sas_device_init_add(ioc, sas_device); + else + leapioraid_scsihost_sas_device_add(ioc, sas_device); +out: + leapioraid_sas_device_put(sas_device); + return 0; +} + +static void +leapioraid_scsihost_remove_device(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data; + + if (sas_device->pfa_led_on) { + leapioraid_scsihost_turn_off_pfa_led(ioc, sas_device); + sas_device->pfa_led_on = 0; + } + dewtprintk(ioc, pr_info( + "%s %s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, sas_device, NULL, NULL)); + if (sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + leapioraid_scsihost_ublock_io_device( + ioc, sas_device->sas_address, + sas_device->port); + sas_target_priv_data->handle = + LEAPIORAID_INVALID_DEVICE_HANDLE; + } + if (!ioc->hide_drives) + leapioraid_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + pr_info("%s removing handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, sas_device->handle, + (unsigned long long)sas_device->sas_address); + leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); + dewtprintk(ioc, pr_info( + "%s %s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long) + sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, sas_device, NULL, NULL)); + kfree(sas_device->serial_number); +} + +static void +leapioraid_scsihost_sas_topology_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasTopoChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + u8 phy_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->ExpStatus) { + case LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED: + status_str = "add"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING: + status_str = "remove"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING: + case 0: + status_str = "responding"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + default: + status_str = "unknown status"; + break; + } + pr_info("%s sas topology change: (%s)\n", + ioc->name, status_str); + pr_info( + "\thandle(0x%04x), enclosure_handle(0x%04x)\n\t\t" + "start_phy(%02d), count(%d)\n", + le16_to_cpu(event_data->ExpanderDevHandle), + le16_to_cpu(event_data->EnclosureHandle), + event_data->StartPhyNum, + event_data->NumEntries); + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + phy_number = event_data->StartPhyNum + i; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + switch (reason_code) { + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED: + status_str = "target add"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + status_str = "target remove"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED: + status_str = "link rate change"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_NO_CHANGE: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + pr_info( + "\tphy(%02d), attached_handle(0x%04x): %s:\n\t\t" + "link rate: new(0x%02x), old(0x%02x)\n", + phy_number, + handle, + status_str, + link_rate, + prev_link_rate); + } +} + +static int +leapioraid_scsihost_sas_topology_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + int i; + u16 parent_handle, handle; + u16 reason_code; + u8 phy_number, max_phys; + struct leapioraid_raid_sas_node *sas_expander; + struct leapioraid_sas_device *sas_device; + u64 sas_address; + unsigned long flags; + u8 link_rate, prev_link_rate; + int rc; + int requeue_event; + struct leapioraid_hba_port *port; + struct LeapioraidEventDataSasTopoChangeList_t *event_data = + fw_event->event_data; + + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + leapioraid_scsihost_sas_topology_change_event_debug( + ioc, event_data); + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) + return 0; + if (!ioc->sas_hba.num_phys) + leapioraid_scsihost_sas_host_add(ioc); + else + leapioraid_scsihost_sas_host_refresh(ioc); + if (fw_event->ignore) { + dewtprintk(ioc, + pr_info("%s ignoring expander event\n", + ioc->name)); + return 0; + } + parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); + port = leapioraid_get_port_by_id(ioc, event_data->PhysicalPort, 0); + if (event_data->ExpStatus == LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED) + if (leapioraid_scsihost_expander_add(ioc, parent_handle) != 0) + return 0; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = leapioraid_scsihost_expander_find_by_handle( + ioc, parent_handle); + if (sas_expander) { + sas_address = sas_expander->sas_address; + max_phys = sas_expander->num_phys; + port = sas_expander->port; + } else if (parent_handle < ioc->sas_hba.num_phys) { + sas_address = ioc->sas_hba.sas_address; + max_phys = ioc->sas_hba.num_phys; + } else { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) { + if (fw_event->ignore) { + dewtprintk(ioc, pr_info( + "%s ignoring expander event\n", + ioc->name)); + return 0; + } + if (ioc->remove_host || ioc->pci_error_recovery) + return 0; + phy_number = event_data->StartPhyNum + i; + if (phy_number >= max_phys) + continue; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + if ((event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) + continue; + if (fw_event->delayed_work_active && (reason_code == + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) { + dewtprintk(ioc, + pr_info( + "%s ignoring Targ not responding\n\t\t" + "event phy in re-queued event processing\n", + ioc->name)); + continue; + } + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + switch (reason_code) { + case LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED: + if (ioc->shost_recovery) + break; + if (link_rate == prev_link_rate) + break; + leapioraid_transport_update_links(ioc, sas_address, + handle, phy_number, + link_rate, port); + if (link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + break; + leapioraid_scsihost_check_device(ioc, sas_address, handle, + phy_number, link_rate); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, + handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + break; + } + if (!test_bit(handle, ioc->pend_os_device_add)) + break; + dewtprintk(ioc, pr_err( + "%s handle(0x%04x) device not found:\n\t\t" + "convert event to a device add\n", + ioc->name, handle)); + event_data->PHY[i].PhyStatus &= 0xF0; + event_data->PHY[i].PhyStatus |= + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED; + fallthrough; + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED: + if (ioc->shost_recovery) + break; + leapioraid_transport_update_links(ioc, sas_address, + handle, phy_number, + link_rate, port); + if (link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + break; + rc = leapioraid_scsihost_add_device(ioc, handle, + fw_event->retries[i], 0); + if (rc) { + fw_event->retries[i]++; + requeue_event = 1; + } else { + event_data->PHY[i].PhyStatus |= + LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT; + } + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + leapioraid_scsihost_device_remove_by_handle(ioc, handle); + break; + } + } + if (event_data->ExpStatus == LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING + && sas_expander) + leapioraid_expander_remove(ioc, sas_address, port); + return requeue_event; +} + +static void +leapioraid_scsihost_sas_device_status_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasDeviceStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA: + reason_str = "smart data"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: + reason_str = "unsupported device discovered"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: + reason_str = "internal device reset"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: + reason_str = "internal task abort"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + reason_str = "internal task abort set"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + reason_str = "internal clear task set"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: + reason_str = "internal query task"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: + reason_str = "sata init failure"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: + reason_str = "internal device reset complete"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: + reason_str = "internal task abort complete"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: + reason_str = "internal async notification"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality complete"; + break; + default: + reason_str = "unknown reason"; + break; + } + pr_info("%s device status change: (%s)\n" + "\thandle(0x%04x), sas address(0x%016llx), tag(%d)", + ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + le16_to_cpu(event_data->TaskTag)); + if (event_data->ReasonCode == LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA) + pr_info("%s , ASC(0x%x), ASCQ(0x%x)\n", + ioc->name, event_data->ASC, event_data->ASCQ); + pr_info("\n"); +} + +static void +leapioraid_scsihost_sas_device_status_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasDeviceStatusChange_t *event_data) +{ + struct LEAPIORAID_TARGET *target_priv_data; + struct leapioraid_sas_device *sas_device; + u64 sas_address; + unsigned long flags; + + if ((ioc->facts.HeaderVersion >> 8) < 0xC) + return; + if (event_data->ReasonCode != + LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && + event_data->ReasonCode != + LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(event_data->SASAddress); + sas_device = __leapioraid_get_sdev_by_addr( + ioc, sas_address, + leapioraid_get_port_by_id(ioc, event_data->PhysicalPort, 0)); + if (!sas_device || !sas_device->starget) + goto out; + target_priv_data = sas_device->starget->hostdata; + if (!target_priv_data) + goto out; + if (event_data->ReasonCode == + LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) + target_priv_data->tm_busy = 1; + else + target_priv_data->tm_busy = 0; + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + pr_err( + "%s %s tm_busy flag for handle(0x%04x)\n", ioc->name, + (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", + target_priv_data->handle); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static void +leapioraid_scsihost_sas_enclosure_dev_status_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasEnclDevStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED: + reason_str = "enclosure add"; + break; + case LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + reason_str = "enclosure remove"; + break; + default: + reason_str = "unknown reason"; + break; + } + pr_info( + "%s enclosure status change: (%s)\n\thandle(0x%04x),\n\t\t" + "enclosure logical id(0x%016llx) number slots(%d)\n", + ioc->name, + reason_str, + le16_to_cpu(event_data->EnclosureHandle), + (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID), + le16_to_cpu(event_data->StartSlot)); +} + +static void +leapioraid_scsihost_sas_enclosure_dev_status_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + struct LeapioraidEventDataSasEnclDevStatusChange_t *event_data = + fw_event->event_data; + int rc; + + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + leapioraid_scsihost_sas_enclosure_dev_status_change_event_debug( + ioc, fw_event->event_data); + if (ioc->shost_recovery) + return; + event_data->EnclosureHandle = le16_to_cpu(event_data->EnclosureHandle); + if (event_data->EnclosureHandle) + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + event_data->EnclosureHandle); + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED: + if (!enclosure_dev) { + enclosure_dev = + kzalloc(sizeof(struct leapioraid_enclosure_node), GFP_KERNEL); + if (!enclosure_dev) { + pr_err("%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + rc = leapioraid_config_get_enclosure_pg0(ioc, + &mpi_reply, + &enclosure_dev->pg0, + LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE, + event_data->EnclosureHandle); + if (rc + || (le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK)) { + kfree(enclosure_dev); + return; + } + list_add_tail(&enclosure_dev->list, + &ioc->enclosure_list); + } + break; + case LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + if (enclosure_dev) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } + break; + default: + break; + } +} + +static void +leapioraid_scsihost_sas_broadcast_primitive_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + u16 smid, handle; + u32 lun; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + u32 termination_count; + u32 query_count; + struct LeapioraidSCSITmgRep_t *mpi_reply; + struct LeapioraidEventDataSasBroadcastPrimitive_t *event_data = + fw_event->event_data; + u16 ioc_status; + unsigned long flags; + int r; + u8 max_retries = 0; + u8 task_abort_retries; + struct leapioraid_scsiio_tracker *st; + + mutex_lock(&ioc->tm_cmds.mutex); + dewtprintk(ioc, + pr_info( + "%s %s: enter: phy number(%d), width(%d)\n", + ioc->name, __func__, + event_data->PhyNum, event_data->PortWidth)); + leapioraid_scsihost_block_io_all_device(ioc); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + mpi_reply = ioc->tm_cmds.reply; +broadcast_aen_retry: + if (max_retries++ == 5) { + dewtprintk(ioc, pr_info("%s %s: giving up\n", + ioc->name, __func__)); + goto out; + } else if (max_retries > 1) + dewtprintk(ioc, pr_info("%s %s: %d retry\n", + ioc->name, __func__, max_retries - 1)); + termination_count = 0; + query_count = 0; + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + if (ioc->shost_recovery) + goto out; + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + st = leapioraid_base_scsi_cmd_priv(scmd); + if (!st || st->smid == 0) + continue; + sdev = scmd->device; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + continue; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_VOLUME) + continue; + handle = sas_device_priv_data->sas_target->handle; + lun = sas_device_priv_data->lun; + query_count++; + if (ioc->shost_recovery) + goto out; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + r = leapioraid_scsihost_issue_tm(ioc, handle, 0, 0, lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK, + st->smid, 30, 0); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "leapioraid_scsihost_issue_tm:\n\t\t" + "FAILED when sending QUERY_TASK: scmd(%p)\n", + scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, + "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", + ioc_status, scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + if (mpi_reply->ResponseCode == + LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED || + mpi_reply->ResponseCode == + LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + continue; + } + task_abort_retries = 0; +tm_retry: + if (task_abort_retries++ == 60) { + dewtprintk(ioc, pr_err( + "%s %s: ABORT_TASK: giving up\n", + ioc->name, __func__)); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + if (ioc->shost_recovery) + goto out_no_lock; + r = leapioraid_scsihost_issue_tm(ioc, handle, sdev->channel, + sdev->id, sdev->lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + st->smid, 30, 0); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "ABORT_TASK: FAILED : scmd(%p)\n", scmd); + goto tm_retry; + } + if (task_abort_retries > 1) + sdev_printk(KERN_WARNING, sdev, + "leapioraid_scsihost_issue_tm:\n\t\t" + "ABORT_TASK: RETRIES (%d): scmd(%p)\n", + task_abort_retries - 1, + scmd); + termination_count += le32_to_cpu(mpi_reply->TerminationCount); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + } + if (ioc->broadcast_aen_pending) { + dewtprintk(ioc, + pr_info("%s %s: loop back due to pending AEN\n", + ioc->name, __func__)); + ioc->broadcast_aen_pending = 0; + goto broadcast_aen_retry; + } +out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); +out_no_lock: + dewtprintk(ioc, pr_err( + "%s %s - exit, query_count = %d termination_count = %d\n", + ioc->name, __func__, query_count, + termination_count)); + ioc->broadcast_aen_busy = 0; + if (!ioc->shost_recovery) + leapioraid_scsihost_ublock_io_all_device(ioc, 1); + mutex_unlock(&ioc->tm_cmds.mutex); +} + +static void +leapioraid_scsihost_sas_discovery_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventDataSasDiscovery_t *event_data + = fw_event->event_data; + + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) { + pr_info("%s sas discovery event: (%s)", + ioc->name, + (event_data->ReasonCode == + LEAPIORAID_EVENT_SAS_DISC_RC_STARTED) ? "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_info("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_info("\n"); + } + if (event_data->ReasonCode == LEAPIORAID_EVENT_SAS_DISC_RC_STARTED && + !ioc->sas_hba.num_phys) { + if (disable_discovery > 0 && ioc->shost_recovery) { + while (ioc->shost_recovery) + ssleep(1); + } + leapioraid_scsihost_sas_host_add(ioc); + } +} + +static void +leapioraid_scsihost_sas_device_discovery_error_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventDataSasDeviceDiscoveryError_t *event_data = + fw_event->event_data; + + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_FAILED: + pr_warn( + "%s SMP command sent to the expander(handle:0x%04x,\n\t\t" + "sas_address:0x%016llx,physical_port:0x%02x) has failed\n", + ioc->name, + le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); + break; + case LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: + pr_warn( + "%s SMP command sent to the expander(handle:0x%04x,\n\t\t" + "sas_address:0x%016llx,physical_port:0x%02x) has timed out\n", + ioc->name, + le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); + break; + default: + break; + } +} + +static int +leapioraid_scsihost_ir_fastpath( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 phys_disk_num) +{ + struct LeapioraidRaidActionReq_t *mpi_request; + struct LeapioraidRaidActionRep_t *mpi_reply; + u16 smid; + u8 issue_reset = 0; + int rc = 0; + u16 ioc_status; + u32 log_info; + + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: scsih_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + rc = -EAGAIN; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidRaidActionReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_RAID_ACTION; + mpi_request->Action = 0x24; + mpi_request->PhysDiskNum = phys_disk_num; + dewtprintk(ioc, pr_info( + "%s IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", + ioc->name, handle, phys_disk_num)); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10 * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, + mpi_request, + sizeof(struct LeapioraidRaidActionReq_t) + / 4, issue_reset); + rc = -EFAULT; + goto out; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + dewtprintk(ioc, pr_err( + "%s IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", + ioc->name, ioc_status, + log_info)); + rc = -EFAULT; + } else + dewtprintk(ioc, pr_err( + "%s IR RAID_ACTION: completed successfully\n", + ioc->name)); + } +out: + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return rc; +} + +static void +leapioraid_scsihost_reprobe_lun( + struct scsi_device *sdev, void *no_uld_attach) +{ + int rc; + + sdev->no_uld_attach = no_uld_attach ? 1 : 0; + sdev_printk(KERN_INFO, sdev, "%s raid component\n", + sdev->no_uld_attach ? "hiding" : "exposing"); + rc = scsi_device_reprobe(sdev); + pr_info("%s rc=%d\n", __func__, rc); +} + +static void +leapioraid_scsihost_sas_volume_add(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_raid_device *raid_device; + unsigned long flags; + u64 wwid; + u16 handle = le16_to_cpu(element->VolDevHandle); + int rc; + + leapioraid_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + pr_err("%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_wwid( + ioc, wwid); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + return; + raid_device = kzalloc(sizeof(struct leapioraid_raid_device), + GFP_KERNEL); + if (!raid_device) + return; + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + leapioraid_scsihost_raid_device_add(ioc, raid_device); + if (!ioc->wait_for_discovery_to_complete) { + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } else { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + leapioraid_scsihost_determine_boot_device( + ioc, raid_device, RAID_CHANNEL); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +static void +leapioraid_scsihost_sas_volume_delete( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct scsi_target *starget = NULL; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_raid_device_find_by_handle(ioc, handle); + if (raid_device) { + if (raid_device->starget) { + starget = raid_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 1; + } + pr_info("%s removing handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, raid_device->handle, + (unsigned long long)raid_device->wwid); + list_del(&raid_device->list); + kfree(raid_device); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (starget) + scsi_remove_target(&starget->dev); +} + +static void +leapioraid_scsihost_sas_pd_expose( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_sas_device *sas_device; + struct scsi_target *starget = NULL; + struct LEAPIORAID_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device->volume_handle = 0; + sas_device->volume_wwid = 0; + clear_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags &= + ~LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT; + sas_device->pfa_led_on = 0; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + if (starget) + starget_for_each_device(starget, NULL, leapioraid_scsihost_reprobe_lun); +} + +static void +leapioraid_scsihost_sas_pd_hide( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_sas_device *sas_device; + struct scsi_target *starget = NULL; + struct LEAPIORAID_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + u16 volume_handle = 0; + u64 volume_wwid = 0; + + leapioraid_config_get_volume_handle(ioc, handle, &volume_handle); + if (volume_handle) + leapioraid_config_get_volume_wwid(ioc, volume_handle, + &volume_wwid); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + set_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT; + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum); + if (starget) + starget_for_each_device(starget, (void *)1, + leapioraid_scsihost_reprobe_lun); +} + +static void +leapioraid_scsihost_sas_pd_delete(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + leapioraid_scsihost_device_remove_by_handle(ioc, handle); +} + +static void +leapioraid_scsihost_sas_pd_add(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_sas_device *sas_device; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + u32 ioc_status; + u64 sas_address; + u16 parent_handle; + + set_bit(handle, ioc->pd_handles); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum); + leapioraid_sas_device_put(sas_device); + return; + } + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address(ioc, parent_handle, &sas_address)) + leapioraid_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id(ioc, + sas_device_pg0.PhysicalPort, + 0)); + leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum); + leapioraid_scsihost_add_device(ioc, handle, 0, 1); +} + +static void +leapioraid_scsihost_sas_ir_config_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrCfgChangeList_t *event_data) +{ + struct LeapioraidEventIrCfgEle_t *element; + u8 element_type; + int i; + char *reason_str = NULL, *element_str = NULL; + + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + pr_info("%s raid config change: (%s), elements(%d)\n", + ioc->name, + (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? "foreign" : + "native", event_data->NumElements); + for (i = 0; i < event_data->NumElements; i++, element++) { + switch (element->ReasonCode) { + case LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED: + reason_str = "add"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED: + reason_str = "remove"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_NO_CHANGE: + reason_str = "no change"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE: + reason_str = "hide"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE: + reason_str = "unhide"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + reason_str = "volume_created"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + reason_str = "volume_deleted"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED: + reason_str = "pd_created"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED: + reason_str = "pd_deleted"; + break; + default: + reason_str = "unknown reason"; + break; + } + element_type = le16_to_cpu(element->ElementFlags) & + LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; + switch (element_type) { + case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: + element_str = "volume"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: + element_str = "phys disk"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: + element_str = "hot spare"; + break; + default: + element_str = "unknown element"; + break; + } + pr_info( + "\t(%s:%s), vol handle(0x%04x), pd handle(0x%04x), pd num(0x%02x)\n", + element_str, + reason_str, le16_to_cpu(element->VolDevHandle), + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } +} + +static void +leapioraid_scsihost_sas_ir_config_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventIrCfgEle_t *element; + int i; + u8 foreign_config; + struct LeapioraidEventDataIrCfgChangeList_t *event_data + = fw_event->event_data; + + if ((ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + && !ioc->warpdrive_msg) + leapioraid_scsihost_sas_ir_config_change_event_debug(ioc, event_data); + foreign_config = (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + if (ioc->shost_recovery) { + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode == + LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE) + leapioraid_scsihost_ir_fastpath(ioc, + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } + return; + } + for (i = 0; i < event_data->NumElements; i++, element++) { + switch (element->ReasonCode) { + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + case LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED: + if (!foreign_config) + leapioraid_scsihost_sas_volume_add(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + case LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED: + if (!foreign_config) + leapioraid_scsihost_sas_volume_delete(ioc, + le16_to_cpu + (element->VolDevHandle)); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED: + leapioraid_scsihost_sas_pd_hide(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED: + leapioraid_scsihost_sas_pd_expose(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE: + leapioraid_scsihost_sas_pd_add(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE: + leapioraid_scsihost_sas_pd_delete(ioc, element); + break; + } + } +} + +static void +leapioraid_scsihost_sas_ir_volume_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + u64 wwid; + unsigned long flags; + struct leapioraid_raid_device *raid_device; + u16 handle; + u32 state; + int rc; + struct LeapioraidEventDataIrVol_t *event_data + = fw_event->event_data; + + if (ioc->shost_recovery) + return; + if (event_data->ReasonCode != LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + handle = le16_to_cpu(event_data->VolDevHandle); + state = le32_to_cpu(event_data->NewValue); + if (!ioc->warpdrive_msg) + dewtprintk(ioc, + pr_info("%s %s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + ioc->name, + __func__, handle, + le32_to_cpu(event_data->PreviousValue), + state)); + switch (state) { + case LEAPIORAID_RAID_VOL_STATE_MISSING: + case LEAPIORAID_RAID_VOL_STATE_FAILED: + leapioraid_scsihost_sas_volume_delete(ioc, handle); + break; + case LEAPIORAID_RAID_VOL_STATE_ONLINE: + case LEAPIORAID_RAID_VOL_STATE_DEGRADED: + case LEAPIORAID_RAID_VOL_STATE_OPTIMAL: + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = + leapioraid_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + break; + leapioraid_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + break; + } + raid_device = kzalloc(sizeof(struct leapioraid_raid_device), + GFP_KERNEL); + if (!raid_device) + break; + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + leapioraid_scsihost_raid_device_add(ioc, raid_device); + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + break; + case LEAPIORAID_RAID_VOL_STATE_INITIALIZING: + default: + break; + } +} + +static void +leapioraid_scsihost_sas_ir_physical_disk_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + u16 handle, parent_handle; + u32 state; + struct leapioraid_sas_device *sas_device; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + u32 ioc_status; + struct LeapioraidEventDataIrPhyDisk_t *event_data + = fw_event->event_data; + u64 sas_address; + + if (ioc->shost_recovery) + return; + if (event_data->ReasonCode != + LEAPIORAID_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) + return; + handle = le16_to_cpu(event_data->PhysDiskDevHandle); + state = le32_to_cpu(event_data->NewValue); + if (!ioc->warpdrive_msg) + dewtprintk(ioc, + pr_info("%s %s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + ioc->name, + __func__, handle, + le32_to_cpu(event_data->PreviousValue), + state)); + switch (state) { + case LEAPIORAID_RAID_PD_STATE_ONLINE: + case LEAPIORAID_RAID_PD_STATE_DEGRADED: + case LEAPIORAID_RAID_PD_STATE_REBUILDING: + case LEAPIORAID_RAID_PD_STATE_OPTIMAL: + case LEAPIORAID_RAID_PD_STATE_HOT_SPARE: + set_bit(handle, ioc->pd_handles); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + return; + } + if ((leapioraid_config_get_sas_device_pg0( + ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address + (ioc, parent_handle, &sas_address)) + leapioraid_transport_update_links(ioc, sas_address, + handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id + (ioc, + sas_device_pg0.PhysicalPort, 0)); + leapioraid_scsihost_add_device(ioc, handle, 0, 1); + break; + case LEAPIORAID_RAID_PD_STATE_OFFLINE: + case LEAPIORAID_RAID_PD_STATE_NOT_CONFIGURED: + case LEAPIORAID_RAID_PD_STATE_NOT_COMPATIBLE: + default: + break; + } +} + +static void +leapioraid_scsihost_sas_ir_operation_status_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrOpStatus_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->RAIDOperation) { + case LEAPIORAID_EVENT_IR_RAIDOP_RESYNC: + reason_str = "resync"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: + reason_str = "online capacity expansion"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: + reason_str = "consistency check"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_BACKGROUND_INIT: + reason_str = "background init"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: + reason_str = "make data consistent"; + break; + } + if (!reason_str) + return; + pr_info( + "%s raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", + ioc->name, reason_str, + le16_to_cpu(event_data->VolDevHandle), + event_data->PercentComplete); +} + +static void +leapioraid_scsihost_sas_ir_operation_status_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventDataIrOpStatus_t *event_data + = fw_event->event_data; + static struct leapioraid_raid_device *raid_device; + unsigned long flags; + u16 handle; + + if ((ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + && !ioc->warpdrive_msg) + leapioraid_scsihost_sas_ir_operation_status_event_debug( + ioc, event_data); + if (event_data->RAIDOperation == LEAPIORAID_EVENT_IR_RAIDOP_RESYNC) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + handle = le16_to_cpu(event_data->VolDevHandle); + raid_device = + leapioraid_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->percent_complete = + event_data->PercentComplete; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +static void +leapioraid_scsihost_prep_device_scan(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) + sas_device_priv_data->sas_target->deleted = 1; + } +} + +static void +leapioraid_scsihost_update_device_qdepth(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct leapioraid_sas_device *sas_device; + struct scsi_device *sdev; + u16 qdepth; + + pr_info("%s Update Devices with FW Reported QD\n", + ioc->name); + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) { + sas_device = sas_device_priv_data->sas_target->sas_dev; + if (sas_device && + sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = + (sas_device->port_type > + 1) ? ioc->max_wideport_qd : ioc->max_narrowport_qd; + else if (sas_device + && sas_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + qdepth = ioc->max_sata_qd; + else + continue; + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + } + } +} + +static void +leapioraid_scsihost_mark_responding_sas_device( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSasDevP0_t *sas_device_pg0) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct leapioraid_sas_device *sas_device; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + unsigned long flags; + struct leapioraid_hba_port *port; + + port = leapioraid_get_port_by_id(ioc, sas_device_pg0->PhysicalPort, 0); + if (sas_device_pg0->EnclosureHandle) { + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + le16_to_cpu + (sas_device_pg0->EnclosureHandle)); + if (enclosure_dev == NULL) + pr_info( + "%s Enclosure handle(0x%04x)doesn't match with enclosure device!\n", + ioc->name, sas_device_pg0->EnclosureHandle); + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if ((sas_device->sas_address == + le64_to_cpu(sas_device_pg0->SASAddress)) + && (sas_device->slot == le16_to_cpu(sas_device_pg0->Slot)) + && (sas_device->port == port)) { + sas_device->responding = 1; + starget = sas_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->tm_busy = 0; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + if (starget) { + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_address(0x%016llx), port: %d\n", + sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->port->port_id); + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + } + if (le16_to_cpu(sas_device_pg0->Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0->EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0->ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0->EnclosureHandle); + sas_device->is_chassis_slot_valid = 0; + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + if (sas_device->handle == + le16_to_cpu(sas_device_pg0->DevHandle)) + goto out; + pr_info("\thandle changed from(0x%04x)!!!\n", + sas_device->handle); + sas_device->handle = + le16_to_cpu(sas_device_pg0->DevHandle); + if (sas_target_priv_data) + sas_target_priv_data->handle = + le16_to_cpu(sas_device_pg0->DevHandle); + goto out; + } + } +out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static void +leapioraid_scsihost_create_enclosure_list_after_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_enclosure_node *enclosure_dev; + struct LeapioraidCfgRep_t mpi_reply; + u16 enclosure_handle; + int rc; + + leapioraid_free_enclosure_list(ioc); + enclosure_handle = 0xFFFF; + do { + enclosure_dev = + kzalloc(sizeof(struct leapioraid_enclosure_node), GFP_KERNEL); + if (!enclosure_dev) { + pr_err("%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + rc = leapioraid_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_dev->pg0, + LEAPIORAID_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, + enclosure_handle); + if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK)) { + kfree(enclosure_dev); + return; + } + list_add_tail(&enclosure_dev->list, &ioc->enclosure_list); + enclosure_handle = + le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); + } while (1); +} + +static void +leapioraid_scsihost_search_responding_sas_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 handle; + u32 device_info; + + pr_info("%s search for end-devices: start\n", + ioc->name); + if (list_empty(&ioc->sas_device_list)) + goto out; + handle = 0xFFFF; + while (!(leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info( + "%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(sas_device_pg0.DevHandle); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(leapioraid_scsihost_is_sas_end_device(device_info))) + continue; + leapioraid_scsihost_mark_responding_sas_device( + ioc, &sas_device_pg0); + } +out: + pr_info("%s search for end-devices: complete\n", + ioc->name); +} + +static void +leapioraid_scsihost_mark_responding_raid_device( + struct LEAPIORAID_ADAPTER *ioc, u64 wwid, u16 handle) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid == wwid && raid_device->starget) { + starget = raid_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + raid_device->responding = 1; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + starget_printk(KERN_INFO, raid_device->starget, + "handle(0x%04x), wwid(0x%016llx)\n", + handle, + (unsigned long long)raid_device->wwid); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + if (raid_device->handle == handle) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + return; + } + pr_info("\thandle changed from(0x%04x)!!!\n", + raid_device->handle); + raid_device->handle = handle; + if (sas_target_priv_data) + sas_target_priv_data->handle = handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return; + } + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +static void +leapioraid_scsihost_search_responding_raid_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidRaidVolP1_t volume_pg1; + struct LeapioraidRaidVolP0_t volume_pg0; + struct LeapioraidRaidPDP0_t pd_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 handle; + u8 phys_disk_num; + + if (!ioc->ir_firmware) + return; + pr_info("%s search for raid volumes: start\n", + ioc->name); + if (list_empty(&ioc->raid_device_list)) + goto out; + handle = 0xFFFF; + while (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply, + &volume_pg1, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info("%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(volume_pg1.DevHandle); + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, + &volume_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) + continue; + if (volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_OPTIMAL || + volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_ONLINE || + volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_DEGRADED) + leapioraid_scsihost_mark_responding_raid_device(ioc, + le64_to_cpu + (volume_pg1.WWID), + handle); + } + phys_disk_num = 0xFF; + memset(ioc->pd_handles, 0, ioc->pd_handles_sz); + while (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, + LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info("%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + set_bit(handle, ioc->pd_handles); + } +out: + pr_info("%s search for responding raid volumes: complete\n", + ioc->name); +} + +static void +leapioraid_scsihost_mark_responding_expander( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidExpanderP0_t *expander_pg0) +{ + struct leapioraid_raid_sas_node *sas_expander; + unsigned long flags; + int i; + u8 port_id = expander_pg0->PhysicalPort; + struct leapioraid_hba_port *port = leapioraid_get_port_by_id( + ioc, port_id, 0); + struct leapioraid_enclosure_node *enclosure_dev = NULL; + u16 handle = le16_to_cpu(expander_pg0->DevHandle); + u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); + u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); + + if (enclosure_handle) + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + enclosure_handle); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address || + (sas_expander->port != port)) + continue; + sas_expander->responding = 1; + if (enclosure_dev) { + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + sas_expander->enclosure_handle = + le16_to_cpu(expander_pg0->EnclosureHandle); + } + if (sas_expander->handle == handle) + goto out; + pr_info( + "\texpander(0x%016llx): handle changed from(0x%04x) to (0x%04x)!!!\n", + (unsigned long long)sas_expander->sas_address, + sas_expander->handle, handle); + sas_expander->handle = handle; + for (i = 0; i < sas_expander->num_phys; i++) + sas_expander->phy[i].handle = handle; + goto out; + } +out: + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +static void +leapioraid_scsihost_search_responding_expanders( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidExpanderP0_t expander_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u64 sas_address; + u16 handle; + u8 port; + + pr_info("%s search for expanders: start\n", + ioc->name); + if (list_empty(&ioc->sas_expander_list)) + goto out; + handle = 0xFFFF; + while (! + (leapioraid_config_get_expander_pg0 + (ioc, &mpi_reply, &expander_pg0, + LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info( + "%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(expander_pg0.DevHandle); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + port = expander_pg0.PhysicalPort; + pr_info( + "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + handle, + (unsigned long long)sas_address, + ((ioc->multipath_on_hba) ? + (port) : (LEAPIORAID_MULTIPATH_DISABLED_PORT_ID))); + leapioraid_scsihost_mark_responding_expander( + ioc, &expander_pg0); + } +out: + pr_info("%s search for expanders: complete\n", + ioc->name); +} + +static void +leapioraid_scsihost_remove_unresponding_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_sas_device *sas_device, *sas_device_next; + struct leapioraid_raid_sas_node *sas_expander, *sas_expander_next; + struct leapioraid_raid_device *raid_device, *raid_device_next; + struct list_head tmp_list; + unsigned long flags; + LIST_HEAD(head); + + pr_info("%s removing unresponding devices: start\n", + ioc->name); + pr_err("%s removing unresponding devices: sas end-devices\n", + ioc->name); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_init_list, list) { + list_del_init(&sas_device->list); + leapioraid_sas_device_put(sas_device); + } + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + if (!sas_device->responding) + list_move_tail(&sas_device->list, &head); + else + sas_device->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { + leapioraid_scsihost_remove_device(ioc, sas_device); + list_del_init(&sas_device->list); + leapioraid_sas_device_put(sas_device); + } + if (ioc->ir_firmware) { + pr_info("%s removing unresponding devices: volumes\n", + ioc->name); + list_for_each_entry_safe(raid_device, raid_device_next, + &ioc->raid_device_list, list) { + if (!raid_device->responding) + leapioraid_scsihost_sas_volume_delete(ioc, + raid_device->handle); + else + raid_device->responding = 0; + } + } + pr_err("%s removing unresponding devices: expanders\n", + ioc->name); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + INIT_LIST_HEAD(&tmp_list); + list_for_each_entry_safe(sas_expander, sas_expander_next, + &ioc->sas_expander_list, list) { + if (!sas_expander->responding) + list_move_tail(&sas_expander->list, &tmp_list); + else + sas_expander->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + list_for_each_entry_safe( + sas_expander, sas_expander_next, &tmp_list, list) { + leapioraid_scsihost_expander_node_remove(ioc, sas_expander); + } + pr_err("%s removing unresponding devices: complete\n", ioc->name); + leapioraid_scsihost_ublock_io_all_device(ioc, 0); +} + +static void +leapioraid_scsihost_refresh_expander_links( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander, u16 handle) +{ + struct LeapioraidExpanderP1_t expander_pg1; + struct LeapioraidCfgRep_t mpi_reply; + int i; + + for (i = 0; i < sas_expander->num_phys; i++) { + if ((leapioraid_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + leapioraid_transport_update_links(ioc, + sas_expander->sas_address, + le16_to_cpu(expander_pg1.AttachedDevHandle), + i, + expander_pg1.NegotiatedLinkRate >> 4, + sas_expander->port); + } +} + +static void +leapioraid_scsihost_scan_for_devices_after_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidExpanderP0_t expander_pg0; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidRaidVolP1_t *volume_pg1; + struct LeapioraidRaidVolP0_t *volume_pg0; + struct LeapioraidRaidPDP0_t pd_pg0; + struct LeapioraidEventIrCfgEle_t element; + struct LeapioraidCfgRep_t mpi_reply; + u8 phys_disk_num, port_id; + u16 ioc_status; + u16 handle, parent_handle; + u64 sas_address; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_sas_node *expander_device; + static struct leapioraid_raid_device *raid_device; + u8 retry_count; + unsigned long flags; + + volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL); + if (!volume_pg0) + return; + + volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL); + if (!volume_pg1) { + kfree(volume_pg0); + return; + } + pr_info("%s scan devices: start\n", ioc->name); + leapioraid_scsihost_sas_host_refresh(ioc); + pr_info("%s \tscan devices: expanders start\n", + ioc->name); + handle = 0xFFFF; + while (! + (leapioraid_config_get_expander_pg0 + (ioc, &mpi_reply, &expander_pg0, + LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(expander_pg0.DevHandle); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + port_id = expander_pg0.PhysicalPort; + expander_device = + leapioraid_scsihost_expander_find_by_sas_address( + ioc, + le64_to_cpu + (expander_pg0.SASAddress), + leapioraid_get_port_by_id + (ioc, + port_id, + 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (expander_device) + leapioraid_scsihost_refresh_expander_links( + ioc, expander_device, handle); + else { + pr_err( + "%s \tBEFORE adding expander:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + leapioraid_scsihost_expander_add(ioc, handle); + pr_info( + "%s \tAFTER adding expander:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + } + } + pr_info("%s \tscan devices: expanders complete\n", + ioc->name); + if (!ioc->ir_firmware) + goto skip_to_sas; + pr_info("%s \tscan devices: phys disk start\n", + ioc->name); + phys_disk_num = 0xFF; + while (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, + LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from phys disk scan:\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + continue; + } + if (leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, + handle) != 0) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address(ioc, parent_handle, + &sas_address)) { + pr_err( + "%s \tBEFORE adding phys disk:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + port_id = sas_device_pg0.PhysicalPort; + leapioraid_transport_update_links(ioc, sas_address, + handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id + (ioc, port_id, 0)); + set_bit(handle, ioc->pd_handles); + retry_count = 0; + while (leapioraid_scsihost_add_device + (ioc, handle, retry_count++, 1)) { + ssleep(1); + } + pr_err( + "%s \tAFTER adding phys disk:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + pr_info("%s \tscan devices: phys disk complete\n", + ioc->name); + pr_info("%s \tscan devices: volumes start\n", + ioc->name); + handle = 0xFFFF; + while (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply, + volume_pg1, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(volume_pg1->DevHandle); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_wwid( + ioc, le64_to_cpu(volume_pg1->WWID)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + continue; + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, + volume_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + if (volume_pg0->VolumeState == LEAPIORAID_RAID_VOL_STATE_OPTIMAL || + volume_pg0->VolumeState == LEAPIORAID_RAID_VOL_STATE_ONLINE || + volume_pg0->VolumeState == + LEAPIORAID_RAID_VOL_STATE_DEGRADED) { + memset(&element, 0, + sizeof(struct LeapioraidEventIrCfgEle_t)); + element.ReasonCode = LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED; + element.VolDevHandle = volume_pg1->DevHandle; + pr_info("%s \tBEFORE adding volume: handle (0x%04x)\n", + ioc->name, volume_pg1->DevHandle); + leapioraid_scsihost_sas_volume_add(ioc, &element); + pr_info("%s \tAFTER adding volume: handle (0x%04x)\n", + ioc->name, volume_pg1->DevHandle); + } + } + pr_info("%s \tscan devices: volumes complete\n", + ioc->name); +skip_to_sas: + pr_info("%s \tscan devices: sas end devices start\n", + ioc->name); + handle = 0xFFFF; + while (!(leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from sas end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(sas_device_pg0.DevHandle); + if (! + (leapioraid_scsihost_is_sas_end_device + (le32_to_cpu(sas_device_pg0.DeviceInfo)))) + continue; + port_id = sas_device_pg0.PhysicalPort; + sas_device = leapioraid_get_sdev_by_addr(ioc, + le64_to_cpu + (sas_device_pg0.SASAddress), + leapioraid_get_port_by_id + (ioc, port_id, 0)); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + continue; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address + (ioc, parent_handle, &sas_address)) { + pr_err( + "%s \tBEFORE adding sas end device:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + leapioraid_transport_update_links(ioc, sas_address, + handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id + (ioc, port_id, 0)); + retry_count = 0; + while (leapioraid_scsihost_add_device + (ioc, handle, retry_count++, 0)) { + ssleep(1); + } + pr_err( + "%s \tAFTER adding sas end device:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + pr_err("%s \tscan devices: sas end devices complete\n", ioc->name); + kfree(volume_pg0); + kfree(volume_pg1); + pr_info("%s scan devices: complete\n", ioc->name); +} + +void +leapioraid_scsihost_clear_outstanding_scsi_tm_commands( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_internal_qcmd *scsih_qcmd, *scsih_qcmd_next; + unsigned long flags; + + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->scsih_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->scsih_cmds.smid); + complete(&ioc->scsih_cmds.done); + } + if (ioc->tm_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->tm_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->tm_cmds.smid); + complete(&ioc->tm_cmds.done); + } + spin_lock_irqsave(&ioc->scsih_q_internal_lock, flags); + list_for_each_entry_safe(scsih_qcmd, scsih_qcmd_next, + &ioc->scsih_q_intenal_cmds, list) { + scsih_qcmd->status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, scsih_qcmd->smid); + } + spin_unlock_irqrestore(&ioc->scsih_q_internal_lock, flags); + memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); + memset(ioc->device_remove_in_progress, 0, + ioc->device_remove_in_progress_sz); + memset(ioc->tm_tr_retry, 0, ioc->tm_tr_retry_sz); + leapioraid_scsihost_fw_event_cleanup_queue(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); +} + +void +leapioraid_scsihost_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + int reset_phase) +{ + switch (reset_phase) { + case LEAPIORAID_IOC_PRE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n", + ioc->name, __func__)); + break; + case LEAPIORAID_IOC_AFTER_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n", + ioc->name, __func__)); + leapioraid_scsihost_clear_outstanding_scsi_tm_commands(ioc); + break; + case LEAPIORAID_IOC_DONE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n", + ioc->name, __func__)); + if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { + if (ioc->multipath_on_hba) { + leapioraid_scsihost_sas_port_refresh(ioc); + leapioraid_scsihost_update_vphys_after_reset(ioc); + } + leapioraid_scsihost_prep_device_scan(ioc); + leapioraid_scsihost_create_enclosure_list_after_reset(ioc); + leapioraid_scsihost_search_responding_sas_devices(ioc); + leapioraid_scsihost_search_responding_raid_devices(ioc); + leapioraid_scsihost_search_responding_expanders(ioc); + leapioraid_scsihost_error_recovery_delete_devices(ioc); + } + break; + } +} + +static void +leapioraid_fw_work(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + ioc->current_event = fw_event; + leapioraid_scsihost_fw_event_del_from_list(ioc, fw_event); + if (ioc->remove_host || ioc->pci_error_recovery) { + leapioraid_fw_event_work_put(fw_event); + ioc->current_event = NULL; + return; + } + switch (fw_event->event) { + case LEAPIORAID_REMOVE_UNRESPONDING_DEVICES: + while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) { + if (ioc->remove_host || ioc->fw_events_cleanup) + goto out; + ssleep(1); + } + leapioraid_scsihost_remove_unresponding_devices(ioc); + leapioraid_scsihost_del_dirty_vphy(ioc); + leapioraid_scsihost_del_dirty_port_entries(ioc); + leapioraid_scsihost_update_device_qdepth(ioc); + leapioraid_scsihost_scan_for_devices_after_reset(ioc); + if (ioc->is_driver_loading) + leapioraid_scsihost_complete_devices_scanning(ioc); + break; + case LEAPIORAID_PORT_ENABLE_COMPLETE: + ioc->start_scan = 0; + dewtprintk(ioc, pr_info( + "%s port enable: complete from worker thread\n", + ioc->name)); + break; + case LEAPIORAID_TURN_ON_PFA_LED: + leapioraid_scsihost_turn_on_pfa_led(ioc, fw_event->device_handle); + break; + case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + if (leapioraid_scsihost_sas_topology_change_event(ioc, fw_event)) { + leapioraid_scsihost_fw_event_requeue(ioc, fw_event, 1000); + ioc->current_event = NULL; + return; + } + break; + case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE: + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + leapioraid_scsihost_sas_device_status_change_event_debug( + ioc, + (struct LeapioraidEventDataSasDeviceStatusChange_t *) + fw_event->event_data); + break; + case LEAPIORAID_EVENT_SAS_DISCOVERY: + leapioraid_scsihost_sas_discovery_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + leapioraid_scsihost_sas_device_discovery_error_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE: + leapioraid_scsihost_sas_broadcast_primitive_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + leapioraid_scsihost_sas_enclosure_dev_status_change_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + leapioraid_scsihost_sas_ir_config_change_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_VOLUME: + leapioraid_scsihost_sas_ir_volume_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + leapioraid_scsihost_sas_ir_physical_disk_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + leapioraid_scsihost_sas_ir_operation_status_event( + ioc, fw_event); + break; + default: + break; + } +out: + leapioraid_fw_event_work_put(fw_event); + ioc->current_event = NULL; +} + +static void +leapioraid_firmware_event_work(struct work_struct *work) +{ + struct leapioraid_fw_event_work *fw_event = container_of(work, + struct leapioraid_fw_event_work, + work); + + leapioraid_fw_work(fw_event->ioc, fw_event); +} + +static void +leapioraid_firmware_event_work_delayed(struct work_struct *work) +{ + struct leapioraid_fw_event_work *fw_event = container_of(work, + struct leapioraid_fw_event_work, + delayed_work.work); + + leapioraid_fw_work(fw_event->ioc, fw_event); +} + +u8 +leapioraid_scsihost_event_callback(struct LEAPIORAID_ADAPTER *ioc, + u8 msix_index, u32 reply) +{ + struct leapioraid_fw_event_work *fw_event; + struct LeapioraidEventNotificationRep_t *mpi_reply; + u16 event; + u16 sz; + + if (ioc->pci_error_recovery) + return 1; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + pr_err("%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + event = le16_to_cpu(mpi_reply->Event); + switch (event) { + case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE: + { + struct LeapioraidEventDataSasBroadcastPrimitive_t *baen_data = + (struct LeapioraidEventDataSasBroadcastPrimitive_t *) + mpi_reply->EventData; + if (baen_data->Primitive != + LEAPIORAID_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) + return 1; + if (ioc->broadcast_aen_busy) { + ioc->broadcast_aen_pending++; + return 1; + } + ioc->broadcast_aen_busy = 1; + break; + } + case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + leapioraid_scsihost_check_topo_delete_events( + ioc, + (struct LeapioraidEventDataSasTopoChangeList_t *) + mpi_reply->EventData); + if (ioc->shost_recovery) + return 1; + break; + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + leapioraid_scsihost_check_ir_config_unhide_events( + ioc, + (struct LeapioraidEventDataIrCfgChangeList_t *) + mpi_reply->EventData); + break; + case LEAPIORAID_EVENT_IR_VOLUME: + leapioraid_scsihost_check_volume_delete_events( + ioc, + (struct LeapioraidEventDataIrVol_t *) + mpi_reply->EventData); + break; + case LEAPIORAID_EVENT_LOG_ENTRY_ADDED: + fallthrough; + case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE: + leapioraid_scsihost_sas_device_status_change_event( + ioc, + (struct LeapioraidEventDataSasDeviceStatusChange_t *) + mpi_reply->EventData); + break; + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + case LEAPIORAID_EVENT_SAS_DISCOVERY: + case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + break; + default: + return 1; + } + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + fw_event->event_data = kzalloc(sz, GFP_ATOMIC); + if (!fw_event->event_data) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + leapioraid_fw_event_work_put(fw_event); + return 1; + } + if (event == LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST) { + struct LeapioraidEventDataSasTopoChangeList_t *topo_event_data = + (struct LeapioraidEventDataSasTopoChangeList_t *) + mpi_reply->EventData; + fw_event->retries = kzalloc(topo_event_data->NumEntries, + GFP_ATOMIC); + if (!fw_event->retries) { + kfree(fw_event->event_data); + leapioraid_fw_event_work_put(fw_event); + return 1; + } + } + memcpy(fw_event->event_data, mpi_reply->EventData, sz); + fw_event->ioc = ioc; + fw_event->VF_ID = mpi_reply->VF_ID; + fw_event->VP_ID = mpi_reply->VP_ID; + fw_event->event = event; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); + return 1; +} + +static void +leapioraid_scsihost_expander_node_remove( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander) +{ + struct leapioraid_sas_port *leapioraid_port, *next; + unsigned long flags; + int port_id; + + list_for_each_entry_safe(leapioraid_port, next, + &sas_expander->sas_port_list, port_list) { + if (ioc->shost_recovery) + return; + if (leapioraid_port->remote_identify.device_type == + SAS_END_DEVICE) + leapioraid_device_remove_by_sas_address(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + else if (leapioraid_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE + || leapioraid_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + leapioraid_expander_remove(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + } + port_id = sas_expander->port->port_id; + leapioraid_transport_port_remove(ioc, sas_expander->sas_address, + sas_expander->sas_address_parent, + sas_expander->port); + pr_info( + "%s expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + ioc->name, + sas_expander->handle, + (unsigned long long)sas_expander->sas_address, + port_id); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_del(&sas_expander->list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + kfree(sas_expander->phy); + kfree(sas_expander); +} + +static void +leapioraid_scsihost_ir_shutdown(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidRaidActionReq_t *mpi_request; + struct LeapioraidRaidActionRep_t *mpi_reply; + u16 smid; + + if (!ioc->ir_firmware) + return; + + if (list_empty(&ioc->raid_device_list)) + return; + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return; + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: scsih_cmd in use\n", + ioc->name, __func__); + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidRaidActionReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_RAID_ACTION; + mpi_request->Action = 0x20; + if (!ioc->warpdrive_msg) + pr_info("%s IR shutdown (sending)\n", + ioc->name); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10 * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + goto out; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + if (!ioc->warpdrive_msg) + pr_info( + "%s IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } +out: + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +static int +leapioraid_scsihost_get_shost_and_ioc(struct pci_dev *pdev, + struct Scsi_Host **shost, + struct LEAPIORAID_ADAPTER **ioc) +{ + *shost = pci_get_drvdata(pdev); + if (*shost == NULL) { + dev_err(&pdev->dev, "pdev's driver data is null\n"); + return -ENXIO; + } + *ioc = leapioraid_shost_private(*shost); + if (*ioc == NULL) { + dev_err(&pdev->dev, "shost's private data is null\n"); + return -ENXIO; + } + return 0; +} + +static void +leapioraid_scsihost_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + struct leapioraid_sas_port *leapioraid_port, *next_port; + struct leapioraid_raid_device *raid_device, *next; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct workqueue_struct *wq; + unsigned long flags; + struct leapioraid_hba_port *port, *port_next; + struct leapioraid_virtual_phy *vphy, *vphy_next; + struct LeapioraidCfgRep_t mpi_reply; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to remove device\n"); + return; + } + + while (ioc->is_driver_loading) + ssleep(1); + + ioc->remove_host = 1; + leapioraid_wait_for_commands_to_complete(ioc); + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (leapioraid_base_pci_device_is_unplugged(ioc)) { + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); + } + leapioraid_scsihost_fw_event_cleanup_queue(ioc); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc->ioc_pg1_copy); + leapioraid_scsihost_ir_shutdown(ioc); + sas_remove_host(shost); + scsi_remove_host(shost); + list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, + list) { + if (raid_device->starget) { + sas_target_priv_data = raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + scsi_remove_target(&raid_device->starget->dev); + } + pr_info("%s removing handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, raid_device->handle, + (unsigned long long)raid_device->wwid); + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } + list_for_each_entry_safe(leapioraid_port, next_port, + &ioc->sas_hba.sas_port_list, port_list) { + if (leapioraid_port->remote_identify.device_type == + SAS_END_DEVICE) + leapioraid_device_remove_by_sas_address(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + else if (leapioraid_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE + || leapioraid_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + leapioraid_expander_remove(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + } + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (port->vphys_mask) { + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + list_del(&vphy->list); + kfree(vphy); + } + } + list_del(&port->list); + kfree(port); + } + if (ioc->sas_hba.num_phys) { + kfree(ioc->sas_hba.phy); + ioc->sas_hba.phy = NULL; + ioc->sas_hba.num_phys = 0; + } + leapioraid_base_detach(ioc); + spin_lock(&leapioraid_gioc_lock); + list_del(&ioc->list); + spin_unlock(&leapioraid_gioc_lock); + scsi_host_put(shost); +} + +static void +leapioraid_scsihost_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + struct workqueue_struct *wq; + unsigned long flags; + struct LeapioraidCfgRep_t mpi_reply; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to shutdown device\n"); + return; + } + ioc->remove_host = 1; + leapioraid_wait_for_commands_to_complete(ioc); + leapioraid_scsihost_fw_event_cleanup_queue(ioc); + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc->ioc_pg1_copy); + leapioraid_scsihost_ir_shutdown(ioc); + leapioraid_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + leapioraid_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + leapioraid_base_free_irq(ioc); + leapioraid_base_disable_msix(ioc); +} + +static void +leapioraid_scsihost_probe_boot_devices(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 channel; + void *device; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + u16 handle; + u64 sas_address_parent; + u64 sas_address; + unsigned long flags; + int rc; + struct leapioraid_hba_port *port; + u8 protection_mask; + + if (!ioc->bios_pg3.BiosVersion) + return; + + device = NULL; + if (ioc->req_boot_device.device) { + device = ioc->req_boot_device.device; + channel = ioc->req_boot_device.channel; + } else if (ioc->req_alt_boot_device.device) { + device = ioc->req_alt_boot_device.device; + channel = ioc->req_alt_boot_device.channel; + } else if (ioc->current_boot_device.device) { + device = ioc->current_boot_device.device; + channel = ioc->current_boot_device.channel; + } + if (!device) + return; + if (channel == RAID_CHANNEL) { + raid_device = device; + if (raid_device->starget) + return; + if (!ioc->disable_eedp_support) { + protection_mask = scsi_host_get_prot(ioc->shost); + if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) { + scsi_host_set_prot(ioc->shost, + protection_mask & 0x77); + pr_err( + "%s: Disabling DIX0 because of unsupport!\n", + ioc->name); + } + } + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } else { + sas_device = device; + if (sas_device->starget) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + handle = sas_device->handle; + sas_address_parent = sas_device->sas_address_parent; + sas_address = sas_device->sas_address; + port = sas_device->port; + list_move_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!port) + return; + + if (ioc->hide_drives) + return; + + if (!leapioraid_transport_port_add(ioc, handle, + sas_address_parent, port)) { + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + leapioraid_transport_port_remove(ioc, + sas_address, + sas_address_parent, + port); + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } + } + } +} + +static void +leapioraid_scsihost_probe_raid(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_raid_device *raid_device, *raid_next; + int rc; + + list_for_each_entry_safe(raid_device, raid_next, + &ioc->raid_device_list, list) { + if (raid_device->starget) + continue; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } +} + +static +struct leapioraid_sas_device *leapioraid_get_next_sas_device( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_sas_device *sas_device = NULL; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&ioc->sas_device_init_list)) { + sas_device = list_first_entry(&ioc->sas_device_init_list, + struct leapioraid_sas_device, list); + leapioraid_sas_device_get(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return sas_device; +} + +static void +leapioraid_sas_device_make_active(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + leapioraid_sas_device_put(sas_device); + } + leapioraid_sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static void +leapioraid_scsihost_probe_sas(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_sas_device *sas_device; + + while ((sas_device = leapioraid_get_next_sas_device(ioc))) { + if (ioc->hide_drives) { + leapioraid_sas_device_make_active(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + continue; + } + if (!leapioraid_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent, + sas_device->port)) { + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + continue; + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + leapioraid_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + continue; + } + } + leapioraid_sas_device_make_active(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + } +} + +static void +leapioraid_scsihost_probe_devices(struct LEAPIORAID_ADAPTER *ioc) +{ + u16 volume_mapping_flags; + + if (!(ioc->facts.ProtocolFlags + & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) + return; + leapioraid_scsihost_probe_boot_devices(ioc); + + if (ioc->ir_firmware) { + volume_mapping_flags = + le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & + LEAPIORAID_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; + if (volume_mapping_flags == + LEAPIORAID_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { + leapioraid_scsihost_probe_raid(ioc); + leapioraid_scsihost_probe_sas(ioc); + } else { + leapioraid_scsihost_probe_sas(ioc); + leapioraid_scsihost_probe_raid(ioc); + } + } else { + leapioraid_scsihost_probe_sas(ioc); + } +} + +static void +leapioraid_scsihost_scan_start(struct Scsi_Host *shost) +{ + struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost); + int rc; + + if (disable_discovery > 0) + return; + ioc->start_scan = 1; + rc = leapioraid_port_enable(ioc); + if (rc != 0) + pr_info("%s port enable: FAILED\n", + ioc->name); +} + +void +leapioraid_scsihost_complete_devices_scanning(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->wait_for_discovery_to_complete) { + ioc->wait_for_discovery_to_complete = 0; + leapioraid_scsihost_probe_devices(ioc); + } + leapioraid_base_start_watchdog(ioc); + ioc->is_driver_loading = 0; +} + +static int +leapioraid_scsihost_scan_finished( + struct Scsi_Host *shost, unsigned long time) +{ + struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost); + u32 ioc_state; + int issue_hard_reset = 0; + + if (disable_discovery > 0) { + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + goto out; + } + if (time >= (300 * HZ)) { + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + pr_info("%s port enable: FAILED with timeout (timeout=300s)\n", + ioc->name); + ioc->is_driver_loading = 0; + goto out; + } + if (ioc->start_scan) { + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + issue_hard_reset = 1; + goto out; + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_wait_for_coredump_completion(ioc, + __func__); + issue_hard_reset = 1; + goto out; + } + return 0; + } + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_RESET) { + pr_err("%s port enable: aborted due to diag reset\n", + ioc->name); + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; + } + if (ioc->start_scan_failed) { + pr_info("%s port enable: FAILED with (ioc_status=0x%08x)\n", + ioc->name, ioc->start_scan_failed); + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + ioc->remove_host = 1; + goto out; + } + pr_info("%s port enable: SUCCESS\n", ioc->name); + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + leapioraid_scsihost_complete_devices_scanning(ioc); +out: + if (issue_hard_reset) { + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (leapioraid_base_hard_reset_handler(ioc, SOFT_RESET)) + ioc->is_driver_loading = 0; + } + return 1; +} + +SCSIH_MAP_QUEUE(struct Scsi_Host *shost) +{ + struct LEAPIORAID_ADAPTER *ioc = + (struct LEAPIORAID_ADAPTER *)shost->hostdata; + struct blk_mq_queue_map *map; + int i, qoff, offset; + int nr_msix_vectors = ioc->iopoll_q_start_index; + int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors; + + if (shost->nr_hw_queues == 1) + return; + for (i = 0, qoff = 0; i < shost->nr_maps; i++) { + map = &shost->tag_set.map[i]; + map->nr_queues = 0; + offset = 0; + if (i == HCTX_TYPE_DEFAULT) { + map->nr_queues = + nr_msix_vectors - ioc->high_iops_queues; + offset = ioc->high_iops_queues; + } else if (i == HCTX_TYPE_POLL) + map->nr_queues = iopoll_q_count; + if (!map->nr_queues) + BUG_ON(i == HCTX_TYPE_DEFAULT); + map->queue_offset = qoff; + if (i != HCTX_TYPE_POLL) + blk_mq_pci_map_queues(map, ioc->pdev, offset); + else + blk_mq_map_queues(map); + qoff += map->nr_queues; + } +} + +static struct scsi_host_template leapioraid_driver_template = { + .module = THIS_MODULE, + .name = "LEAPIO RAID Host", + .proc_name = LEAPIORAID_DRIVER_NAME, + .queuecommand = leapioraid_scsihost_qcmd, + .target_alloc = leapioraid_scsihost_target_alloc, + .slave_alloc = leapioraid_scsihost_slave_alloc, + .slave_configure = leapioraid_scsihost_slave_configure, + .target_destroy = leapioraid_scsihost_target_destroy, + .slave_destroy = leapioraid_scsihost_slave_destroy, + .scan_finished = leapioraid_scsihost_scan_finished, + .scan_start = leapioraid_scsihost_scan_start, + .change_queue_depth = leapioraid_scsihost_change_queue_depth, + .eh_abort_handler = leapioraid_scsihost_abort, + .eh_device_reset_handler = leapioraid_scsihost_dev_reset, + .eh_target_reset_handler = leapioraid_scsihost_target_reset, + .eh_host_reset_handler = leapioraid_scsihost_host_reset, + .bios_param = leapioraid_scsihost_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = LEAPIORAID_SG_DEPTH, + .max_sectors = 128, + .max_segment_size = 0xffffffff, + .cmd_per_lun = 128, + .shost_groups = leapioraid_host_groups, + .sdev_groups = leapioraid_dev_groups, + .track_queue_depth = 1, + .cmd_size = sizeof(struct leapioraid_scsiio_tracker), + .map_queues = leapioraid_scsihost_map_queues, + .mq_poll = leapioraid_blk_mq_poll, +}; + +static struct raid_function_template leapioraid_raid_functions = { + .cookie = &leapioraid_driver_template, + .is_raid = leapioraid_scsihost_is_raid, + .get_resync = leapioraid_scsihost_get_resync, + .get_state = leapioraid_scsihost_get_state, +}; + +static int +leapioraid_scsihost_probe( + struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct LEAPIORAID_ADAPTER *ioc; + struct Scsi_Host *shost = NULL; + int rv; + + shost = scsi_host_alloc(&leapioraid_driver_template, + sizeof(struct LEAPIORAID_ADAPTER)); + if (!shost) + return -ENODEV; + ioc = shost_priv(shost); + memset(ioc, 0, sizeof(struct LEAPIORAID_ADAPTER)); + ioc->id = leapioraid_ids++; + sprintf(ioc->driver_name, "%s", LEAPIORAID_DRIVER_NAME); + + ioc->combined_reply_queue = 1; + ioc->nc_reply_index_count = 16; + ioc->multipath_on_hba = 1; + + ioc = leapioraid_shost_private(shost); + INIT_LIST_HEAD(&ioc->list); + spin_lock(&leapioraid_gioc_lock); + list_add_tail(&ioc->list, &leapioraid_ioc_list); + spin_unlock(&leapioraid_gioc_lock); + ioc->shost = shost; + ioc->pdev = pdev; + + ioc->scsi_io_cb_idx = scsi_io_cb_idx; + ioc->tm_cb_idx = tm_cb_idx; + ioc->ctl_cb_idx = ctl_cb_idx; + ioc->ctl_tm_cb_idx = ctl_tm_cb_idx; + ioc->base_cb_idx = base_cb_idx; + ioc->port_enable_cb_idx = port_enable_cb_idx; + ioc->transport_cb_idx = transport_cb_idx; + ioc->scsih_cb_idx = scsih_cb_idx; + ioc->config_cb_idx = config_cb_idx; + ioc->tm_tr_cb_idx = tm_tr_cb_idx; + ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; + ioc->tm_tr_internal_cb_idx = tm_tr_internal_cb_idx; + ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; + + ioc->logging_level = logging_level; + ioc->schedule_dead_ioc_flush_running_cmds = + &leapioraid_scsihost_flush_running_cmds; + ioc->open_pcie_trace = open_pcie_trace; + ioc->enable_sdev_max_qd = 0; + ioc->max_shutdown_latency = 6; + ioc->drv_support_bitmap |= 0x00000001; + ioc->drv_support_bitmap |= 0x00000002; + + mutex_init(&ioc->reset_in_progress_mutex); + mutex_init(&ioc->hostdiag_unlock_mutex); + mutex_init(&ioc->pci_access_mutex); + spin_lock_init(&ioc->ioc_reset_in_progress_lock); + spin_lock_init(&ioc->scsi_lookup_lock); + spin_lock_init(&ioc->sas_device_lock); + spin_lock_init(&ioc->sas_node_lock); + spin_lock_init(&ioc->fw_event_lock); + spin_lock_init(&ioc->raid_device_lock); + spin_lock_init(&ioc->scsih_q_internal_lock); + spin_lock_init(&ioc->hba_hot_unplug_lock); + INIT_LIST_HEAD(&ioc->sas_device_list); + INIT_LIST_HEAD(&ioc->port_table_list); + INIT_LIST_HEAD(&ioc->sas_device_init_list); + INIT_LIST_HEAD(&ioc->sas_expander_list); + INIT_LIST_HEAD(&ioc->enclosure_list); + INIT_LIST_HEAD(&ioc->fw_event_list); + INIT_LIST_HEAD(&ioc->raid_device_list); + INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); + INIT_LIST_HEAD(&ioc->delayed_tr_list); + INIT_LIST_HEAD(&ioc->delayed_sc_list); + INIT_LIST_HEAD(&ioc->delayed_event_ack_list); + INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->delayed_internal_tm_list); + INIT_LIST_HEAD(&ioc->scsih_q_intenal_cmds); + INIT_LIST_HEAD(&ioc->reply_queue_list); + sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); + + shost->max_cmd_len = 32; + shost->max_lun = 8; + shost->transportt = leapioraid_transport_template; + shost->unique_id = ioc->id; + + ioc->drv_internal_flags |= LEAPIORAID_DRV_INTERNAL_BITMAP_BLK_MQ; + + ioc->disable_eedp_support = 1; + snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), + "fw_event_%s%u", ioc->driver_name, ioc->id); + ioc->firmware_event_thread = + alloc_ordered_workqueue(ioc->firmware_event_name, 0); + if (!ioc->firmware_event_thread) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_thread_fail; + } + + shost->host_tagset = 0; + ioc->is_driver_loading = 1; + if ((leapioraid_base_attach(ioc))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_attach_fail; + } + ioc->hide_drives = 0; + + shost->nr_hw_queues = 1; + rv = scsi_add_host(shost, &pdev->dev); + if (rv) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + spin_lock(&leapioraid_gioc_lock); + list_del(&ioc->list); + spin_unlock(&leapioraid_gioc_lock); + goto out_add_shost_fail; + } + + scsi_scan_host(shost); + + return 0; +out_add_shost_fail: + leapioraid_base_detach(ioc); +out_attach_fail: + destroy_workqueue(ioc->firmware_event_thread); +out_thread_fail: + spin_lock(&leapioraid_gioc_lock); + list_del(&ioc->list); + spin_unlock(&leapioraid_gioc_lock); + scsi_host_put(shost); + return rv; +} + +#ifdef CONFIG_PM +static int +leapioraid_scsihost_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + pci_power_t device_state; + int rc; + + rc = leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc); + if (rc) { + dev_err(&pdev->dev, "unable to suspend device\n"); + return rc; + } + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + scsi_block_requests(shost); + device_state = pci_choose_state(pdev, state); + leapioraid_scsihost_ir_shutdown(ioc); + pr_info("%s pdev=0x%p, slot=%s, entering operating state [D%d]\n", + ioc->name, pdev, + pci_name(pdev), device_state); + pci_save_state(pdev); + leapioraid_base_free_resources(ioc); + pci_set_power_state(pdev, device_state); + return 0; +} + +static int +leapioraid_scsihost_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + pci_power_t device_state = pdev->current_state; + int r; + + r = leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc); + if (r) { + dev_err(&pdev->dev, "unable to resume device\n"); + return r; + } + pr_info("%s pdev=0x%p, slot=%s, previous operating state [D%d]\n", + ioc->name, pdev, + pci_name(pdev), device_state); + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + ioc->pdev = pdev; + r = leapioraid_base_map_resources(ioc); + if (r) + return r; + pr_err("%s issuing hard reset as part of OS resume\n", + ioc->name); + leapioraid_base_hard_reset_handler(ioc, SOFT_RESET); + scsi_unblock_requests(shost); + leapioraid_base_start_watchdog(ioc); + leapioraid_base_start_hba_unplug_watchdog(ioc); + return 0; +} +#endif + +static pci_ers_result_t +leapioraid_scsihost_pci_error_detected( + struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "device unavailable\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pr_err("%s PCI error: detected callback, state(%d)!!\n", + ioc->name, state); + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + ioc->pci_error_recovery = 1; + scsi_block_requests(ioc->shost); + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_free_resources(ioc); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + ioc->pci_error_recovery = 1; + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t +leapioraid_scsihost_pci_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + int rc; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to perform slot reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pr_err("%s PCI error: slot reset callback!!\n", + ioc->name); + ioc->pci_error_recovery = 0; + ioc->pdev = pdev; + pci_restore_state(pdev); + rc = leapioraid_base_map_resources(ioc); + if (rc) + return PCI_ERS_RESULT_DISCONNECT; + pr_info("%s issuing hard reset as part of PCI slot reset\n", + ioc->name); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + pr_info("%s hard reset: %s\n", + ioc->name, (rc == 0) ? "success" : "failed"); + if (!rc) + return PCI_ERS_RESULT_RECOVERED; + else + return PCI_ERS_RESULT_DISCONNECT; +} + +static void +leapioraid_scsihost_pci_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to resume device\n"); + return; + } + pr_err("%s PCI error: resume callback!!\n", + ioc->name); + + pci_aer_clear_nonfatal_status(pdev); + + leapioraid_base_start_watchdog(ioc); + leapioraid_base_start_hba_unplug_watchdog(ioc); + scsi_unblock_requests(ioc->shost); +} + +static pci_ers_result_t +leapioraid_scsihost_pci_mmio_enabled(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to enable mmio\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pr_err("%s: PCI error: mmio enabled callback!!!\n", + ioc->name); + return PCI_ERS_RESULT_RECOVERED; +} + +u8 leapioraid_scsihost_ncq_prio_supp(struct scsi_device *sdev) +{ + u8 ncq_prio_supp = 0; + + struct scsi_vpd *vpd; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (!vpd || vpd->len < 214) + goto out; + ncq_prio_supp = (vpd->data[213] >> 4) & 1; +out: + rcu_read_unlock(); + return ncq_prio_supp; +} + +static const struct pci_device_id leapioraid_pci_table[] = { + { 0x1556, 0x1111, PCI_ANY_ID, PCI_ANY_ID }, + { LEAPIORAID_VENDOR_ID, LEAPIORAID_DEVICE_ID_1, PCI_ANY_ID, PCI_ANY_ID }, + { LEAPIORAID_VENDOR_ID, LEAPIORAID_DEVICE_ID_2, PCI_ANY_ID, PCI_ANY_ID }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, leapioraid_pci_table); +static struct pci_error_handlers leapioraid_err_handler = { + .error_detected = leapioraid_scsihost_pci_error_detected, + .mmio_enabled = leapioraid_scsihost_pci_mmio_enabled, + .slot_reset = leapioraid_scsihost_pci_slot_reset, + .resume = leapioraid_scsihost_pci_resume, +}; + +static struct pci_driver leapioraid_driver = { + .name = LEAPIORAID_DRIVER_NAME, + .id_table = leapioraid_pci_table, + .probe = leapioraid_scsihost_probe, + .remove = leapioraid_scsihost_remove, + .shutdown = leapioraid_scsihost_shutdown, + .err_handler = &leapioraid_err_handler, +#ifdef CONFIG_PM + .suspend = leapioraid_scsihost_suspend, + .resume = leapioraid_scsihost_resume, +#endif +}; + +static int +leapioraid_scsihost_init(void) +{ + leapioraid_ids = 0; + leapioraid_base_initialize_callback_handler(); + + scsi_io_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_io_done); + tm_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_done); + base_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_base_done); + port_enable_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_port_enable_done); + transport_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_transport_done); + scsih_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_done); + config_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_config_done); + ctl_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_ctl_done); + ctl_tm_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_ctl_tm_done); + tm_tr_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_tr_complete); + tm_tr_volume_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_volume_tr_complete); + tm_tr_internal_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_internal_tr_complete); + tm_sas_control_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_sas_control_complete); + + return 0; +} + +static void +leapioraid_scsihost_exit(void) +{ + leapioraid_base_release_callback_handler(scsi_io_cb_idx); + leapioraid_base_release_callback_handler(tm_cb_idx); + leapioraid_base_release_callback_handler(base_cb_idx); + leapioraid_base_release_callback_handler(port_enable_cb_idx); + leapioraid_base_release_callback_handler(transport_cb_idx); + leapioraid_base_release_callback_handler(scsih_cb_idx); + leapioraid_base_release_callback_handler(config_cb_idx); + leapioraid_base_release_callback_handler(ctl_cb_idx); + leapioraid_base_release_callback_handler(ctl_tm_cb_idx); + leapioraid_base_release_callback_handler(tm_tr_cb_idx); + leapioraid_base_release_callback_handler(tm_tr_volume_cb_idx); + leapioraid_base_release_callback_handler(tm_tr_internal_cb_idx); + leapioraid_base_release_callback_handler(tm_sas_control_cb_idx); + + raid_class_release(leapioraid_raid_template); + sas_release_transport(leapioraid_transport_template); +} + +static int __init leapioraid_init(void) +{ + int error; + + pr_info("%s version %s loaded\n", LEAPIORAID_DRIVER_NAME, + LEAPIORAID_DRIVER_VERSION); + leapioraid_transport_template = + sas_attach_transport(&leapioraid_transport_functions); + + if (!leapioraid_transport_template) + return -ENODEV; + + leapioraid_raid_template = + raid_class_attach(&leapioraid_raid_functions); + if (!leapioraid_raid_template) { + sas_release_transport(leapioraid_transport_template); + return -ENODEV; + } + + error = leapioraid_scsihost_init(); + if (error) { + leapioraid_scsihost_exit(); + return error; + } + leapioraid_ctl_init(); + error = pci_register_driver(&leapioraid_driver); + if (error) + leapioraid_scsihost_exit(); + return error; +} + +static void __exit leapioraid_exit(void) +{ + pr_info("leapioraid_ids version %s unloading\n", + LEAPIORAID_DRIVER_VERSION); + leapioraid_ctl_exit(); + pci_unregister_driver(&leapioraid_driver); + leapioraid_scsihost_exit(); +} + +module_init(leapioraid_init); +module_exit(leapioraid_exit); diff --git a/drivers/scsi/leapioraid/leapioraid_transport.c b/drivers/scsi/leapioraid/leapioraid_transport.c new file mode 100644 index 000000000000..a3a446e994ea --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_transport.c @@ -0,0 +1,1926 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SAS Transport Layer for MPT (Message Passing Technology) based controllers + * + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" + +static +struct leapioraid_raid_sas_node *leapioraid_transport_sas_node_find_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + if (ioc->sas_hba.sas_address == sas_address) + return &ioc->sas_hba; + else + return leapioraid_scsihost_expander_find_by_sas_address(ioc, + sas_address, + port); +} + +static inline u8 +leapioraid_transport_get_port_id_by_sas_phy(struct sas_phy *phy) +{ + u8 port_id = 0xFF; + struct leapioraid_hba_port *port = phy->hostdata; + + if (port) + port_id = port->port_id; + else + BUG(); + return port_id; +} + +static int +leapioraid_transport_find_parent_node( + struct LEAPIORAID_ADAPTER *ioc, struct sas_phy *phy) +{ + unsigned long flags; + struct leapioraid_hba_port *port = phy->hostdata; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (leapioraid_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + port) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return 0; +} + +static u8 +leapioraid_transport_get_port_id_by_rphy(struct LEAPIORAID_ADAPTER *ioc, + struct sas_rphy *rphy) +{ + struct leapioraid_raid_sas_node *sas_expander; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + u8 port_id = 0xFF; + + if (!rphy) + return port_id; + if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE || + rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->rphy == rphy) { + port_id = sas_expander->port->port_id; + break; + } + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + } else if (rphy->identify.device_type == SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy( + ioc, rphy->identify.sas_address, rphy); + if (sas_device) { + port_id = sas_device->port->port_id; + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + return port_id; +} + +static enum sas_linkrate +leapioraid_transport_convert_phy_link_rate(u8 link_rate) +{ + enum sas_linkrate rc; + + switch (link_rate) { + case LEAPIORAID_SAS_NEG_LINK_RATE_1_5: + rc = SAS_LINK_RATE_1_5_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_3_0: + rc = SAS_LINK_RATE_3_0_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_6_0: + rc = SAS_LINK_RATE_6_0_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_12_0: + rc = SAS_LINK_RATE_12_0_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_PHY_DISABLED: + rc = SAS_PHY_DISABLED; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED: + rc = SAS_LINK_RATE_FAILED; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_PORT_SELECTOR: + rc = SAS_SATA_PORT_SELECTOR; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS: + default: + case LEAPIORAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE: + case LEAPIORAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE: + rc = SAS_LINK_RATE_UNKNOWN; + break; + } + return rc; +} + +static int +leapioraid_transport_set_identify( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + struct sas_identify *identify) +{ + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 device_info; + u32 ioc_status; + + if ((ioc->shost_recovery && !ioc->is_driver_loading) + || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n", + ioc->name, handle, + ioc_status, __FILE__, __LINE__, __func__); + return -EIO; + } + memset(identify, 0, sizeof(struct sas_identify)); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + identify->phy_identifier = sas_device_pg0.PhyNum; + switch (device_info & LEAPIORAID_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { + case LEAPIORAID_SAS_DEVICE_INFO_NO_DEVICE: + identify->device_type = SAS_PHY_UNUSED; + break; + case LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE: + identify->device_type = SAS_END_DEVICE; + break; + case LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER: + identify->device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + case LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER: + identify->device_type = SAS_FANOUT_EXPANDER_DEVICE; + break; + } + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SMP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_HOST) + identify->initiator_port_protocols |= SAS_PROTOCOL_SATA; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SMP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + identify->target_port_protocols |= SAS_PROTOCOL_SATA; + return 0; +} + +u8 +leapioraid_transport_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (ioc->transport_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->transport_cmds.smid != smid) + return 1; + ioc->transport_cmds.status |= LEAPIORAID_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->transport_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc->transport_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + } + ioc->transport_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->transport_cmds.done); + return 1; +} + +#if defined(LEAPIORAID_WIDE_PORT_API) +struct leapioraid_rep_manu_request { + u8 smp_frame_type; + u8 function; + u8 reserved; + u8 request_length; +}; + +struct leapioraid_rep_manu_reply { + u8 smp_frame_type; + u8 function; + u8 function_result; + u8 response_length; + u16 expander_change_count; + u8 reserved0[2]; + u8 sas_format; + u8 reserved2[3]; + u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; + u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; + u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; + u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; + u16 component_id; + u8 component_revision_id; + u8 reserved3; + u8 vendor_specific[8]; +}; + +static int +leapioraid_transport_expander_report_manufacture( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct sas_expander_device *edev, + u8 port_id) +{ + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + struct leapioraid_rep_manu_reply *manufacture_reply; + struct leapioraid_rep_manu_request *manufacture_request; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + dma_addr_t data_in_dma; + size_t data_in_sz; + size_t data_out_sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->transport_cmds.mutex); + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = 0; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + data_out_sz = sizeof(struct leapioraid_rep_manu_request); + data_in_sz = sizeof(struct leapioraid_rep_manu_reply); + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + &data_out_dma, GFP_ATOMIC); + if (!data_out) { + rc = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + data_in_dma = data_out_dma + sizeof(struct leapioraid_rep_manu_request); + manufacture_request = data_out; + manufacture_request->smp_frame_type = 0x40; + manufacture_request->function = 1; + manufacture_request->reserved = 0; + manufacture_request->request_length = 0; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = port_id; + mpi_request->SASAddress = cpu_to_le64(sas_address); + mpi_request->RequestDataLength = cpu_to_le16(data_out_sz); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + dtransportprintk(ioc, + pr_info("%s report_manufacture - send to sas_addr(0x%016llx)\n", + ioc->name, + (unsigned long long)sas_address)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + dtransportprintk(ioc, + pr_info("%s report_manufacture - complete\n", ioc->name)); + if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + u8 *tmp; + + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, pr_err( + "%s report_manufacture - reply data transfer size(%d)\n", + ioc->name, + le16_to_cpu(mpi_reply->ResponseDataLength))); + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct leapioraid_rep_manu_reply)) + goto out; + manufacture_reply = data_out + sizeof(struct leapioraid_rep_manu_request); + strscpy(edev->vendor_id, manufacture_reply->vendor_id, + sizeof(edev->vendor_id)); + strscpy(edev->product_id, manufacture_reply->product_id, + sizeof(edev->product_id)); + strscpy(edev->product_rev, manufacture_reply->product_rev, + sizeof(edev->product_rev)); + edev->level = manufacture_reply->sas_format & 1; + if (edev->level) { + strscpy(edev->component_vendor_id, + manufacture_reply->component_vendor_id, + sizeof(edev->component_vendor_id)); + tmp = (u8 *) &manufacture_reply->component_id; + edev->component_id = tmp[0] << 8 | tmp[1]; + edev->component_revision_id = + manufacture_reply->component_revision_id; + } + } else + dtransportprintk(ioc, pr_err( + "%s report_manufacture - no reply\n", + ioc->name)); +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + data_out, data_out_dma); + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} +#endif + +static void +leapioraid_transport_delete_port(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_port *leapioraid_port) +{ + u64 sas_address = leapioraid_port->remote_identify.sas_address; + struct leapioraid_hba_port *port = leapioraid_port->hba_port; + enum sas_device_type device_type = + leapioraid_port->remote_identify.device_type; + +#if defined(LEAPIORAID_WIDE_PORT_API) + dev_info(&leapioraid_port->port->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long)sas_address); +#endif + ioc->logging_level |= LEAPIORAID_DEBUG_TRANSPORT; + if (device_type == SAS_END_DEVICE) + leapioraid_device_remove_by_sas_address(ioc, sas_address, port); + else if (device_type == SAS_EDGE_EXPANDER_DEVICE || + device_type == SAS_FANOUT_EXPANDER_DEVICE) + leapioraid_expander_remove(ioc, sas_address, port); + ioc->logging_level &= ~LEAPIORAID_DEBUG_TRANSPORT; +} + +#if defined(LEAPIORAID_WIDE_PORT_API) +static void +leapioraid_transport_delete_phy(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_port *leapioraid_port, + struct leapioraid_sas_phy *leapioraid_phy) +{ + u64 sas_address = leapioraid_port->remote_identify.sas_address; + + dev_info(&leapioraid_phy->phy->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long)sas_address, leapioraid_phy->phy_id); + list_del(&leapioraid_phy->port_siblings); + leapioraid_port->num_phys--; + sas_port_delete_phy(leapioraid_port->port, leapioraid_phy->phy); + leapioraid_phy->phy_belongs_to_port = 0; +} + +static void +leapioraid_transport_add_phy(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_port *leapioraid_port, + struct leapioraid_sas_phy *leapioraid_phy) +{ + u64 sas_address = leapioraid_port->remote_identify.sas_address; + + dev_info(&leapioraid_phy->phy->dev, + "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long) + sas_address, leapioraid_phy->phy_id); + list_add_tail(&leapioraid_phy->port_siblings, + &leapioraid_port->phy_list); + leapioraid_port->num_phys++; + sas_port_add_phy(leapioraid_port->port, leapioraid_phy->phy); + leapioraid_phy->phy_belongs_to_port = 1; +} + +void +leapioraid_transport_add_phy_to_an_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy *leapioraid_phy, + u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_port *leapioraid_port; + struct leapioraid_sas_phy *phy_srch; + + if (leapioraid_phy->phy_belongs_to_port == 1) + return; + if (!port) + return; + list_for_each_entry(leapioraid_port, &sas_node->sas_port_list, + port_list) { + if (leapioraid_port->remote_identify.sas_address != sas_address) + continue; + if (leapioraid_port->hba_port != port) + continue; + list_for_each_entry(phy_srch, &leapioraid_port->phy_list, + port_siblings) { + if (phy_srch == leapioraid_phy) + return; + } + leapioraid_transport_add_phy(ioc, leapioraid_port, leapioraid_phy); + return; + } +} +#endif + +void +leapioraid_transport_del_phy_from_an_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy *leapioraid_phy) +{ + struct leapioraid_sas_port *leapioraid_port, *next; + struct leapioraid_sas_phy *phy_srch; + + if (leapioraid_phy->phy_belongs_to_port == 0) + return; + list_for_each_entry_safe(leapioraid_port, next, + &sas_node->sas_port_list, port_list) { + list_for_each_entry(phy_srch, &leapioraid_port->phy_list, + port_siblings) { + if (phy_srch != leapioraid_phy) + continue; +#if defined(LEAPIORAID_WIDE_PORT_API) + if (leapioraid_port->num_phys == 1 + && !ioc->shost_recovery) + leapioraid_transport_delete_port(ioc, leapioraid_port); + else + leapioraid_transport_delete_phy(ioc, leapioraid_port, + leapioraid_phy); +#else + leapioraid_transport_delete_port(ioc, leapioraid_port); +#endif + return; + } + } +} + +static void +leapioraid_transport_sanity_check( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, u64 sas_address, + struct leapioraid_hba_port *port) +{ + int i; + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != sas_address + || sas_node->phy[i].port != port) + continue; + if (sas_node->phy[i].phy_belongs_to_port == 1) + leapioraid_transport_del_phy_from_an_existing_port(ioc, + sas_node, + &sas_node->phy + [i]); + } +} + +struct leapioraid_sas_port *leapioraid_transport_port_add( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u64 sas_address, + struct leapioraid_hba_port *hba_port) +{ + struct leapioraid_sas_phy *leapioraid_phy, *next; + struct leapioraid_sas_port *leapioraid_port; + unsigned long flags; + struct leapioraid_raid_sas_node *sas_node; + struct sas_rphy *rphy; + struct leapioraid_sas_device *sas_device = NULL; + int i; +#if defined(LEAPIORAID_WIDE_PORT_API) + struct sas_port *port; +#endif + struct leapioraid_virtual_phy *vphy = NULL; + + if (!hba_port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return NULL; + } + leapioraid_port = kzalloc(sizeof(struct leapioraid_sas_port), GFP_KERNEL); + if (!leapioraid_port) + return NULL; + INIT_LIST_HEAD(&leapioraid_port->port_list); + INIT_LIST_HEAD(&leapioraid_port->phy_list); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = leapioraid_transport_sas_node_find_by_sas_address( + ioc, + sas_address, + hba_port); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (!sas_node) { + pr_err("%s %s: Could not find parent sas_address(0x%016llx)!\n", + ioc->name, + __func__, (unsigned long long)sas_address); + goto out_fail; + } + if ((leapioraid_transport_set_identify(ioc, handle, + &leapioraid_port->remote_identify))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + if (leapioraid_port->remote_identify.device_type == SAS_PHY_UNUSED) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + leapioraid_port->hba_port = hba_port; + leapioraid_transport_sanity_check(ioc, sas_node, + leapioraid_port->remote_identify.sas_address, + hba_port); + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != + leapioraid_port->remote_identify.sas_address || + sas_node->phy[i].port != hba_port) + continue; + list_add_tail(&sas_node->phy[i].port_siblings, + &leapioraid_port->phy_list); + leapioraid_port->num_phys++; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!sas_node->phy[i].hba_vphy) { + hba_port->phy_mask |= (1 << i); + continue; + } + vphy = leapioraid_get_vphy_by_phy(ioc, hba_port, i); + if (!vphy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + } + } + if (!leapioraid_port->num_phys) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device = leapioraid_get_sdev_by_addr(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + if (!sas_device) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + sas_device->pend_sas_rphy_add = 1; + } +#if defined(LEAPIORAID_WIDE_PORT_API) + if (!sas_node->parent_dev) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + port = sas_port_alloc_num(sas_node->parent_dev); + if ((sas_port_add(port))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + list_for_each_entry(leapioraid_phy, &leapioraid_port->phy_list, + port_siblings) { + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&port->dev, + "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n", + handle, + (unsigned long long) + leapioraid_port->remote_identify.sas_address, + leapioraid_phy->phy_id); + sas_port_add_phy(port, leapioraid_phy->phy); + leapioraid_phy->phy_belongs_to_port = 1; + leapioraid_phy->port = hba_port; + } + leapioraid_port->port = port; + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + rphy = sas_end_device_alloc(port); + sas_device->rphy = rphy; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!vphy) + hba_port->sas_address = sas_device->sas_address; + else + vphy->sas_address = sas_device->sas_address; + } + } else { + rphy = sas_expander_alloc(port, + leapioraid_port->remote_identify.device_type); + if (sas_node->handle <= ioc->sas_hba.num_phys) + hba_port->sas_address = + leapioraid_port->remote_identify.sas_address; + } +#else + leapioraid_phy = + list_entry(leapioraid_port->phy_list.next, struct leapioraid_sas_phy, + port_siblings); + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + rphy = sas_end_device_alloc(leapioraid_phy->phy); + sas_device->rphy = rphy; + } else + rphy = sas_expander_alloc(leapioraid_phy->phy, + leapioraid_port->remote_identify.device_type); +#endif + rphy->identify = leapioraid_port->remote_identify; + if ((sas_rphy_add(rphy))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + } + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device->pend_sas_rphy_add = 0; + leapioraid_sas_device_put(sas_device); + } + dev_info(&rphy->dev, + "%s: added: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, handle, (unsigned long long) + leapioraid_port->remote_identify.sas_address); + leapioraid_port->rphy = rphy; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&leapioraid_port->port_list, &sas_node->sas_port_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +#if defined(LEAPIORAID_WIDE_PORT_API) + if (leapioraid_port->remote_identify.device_type == + LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER || + leapioraid_port->remote_identify.device_type == + LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER) + leapioraid_transport_expander_report_manufacture(ioc, + leapioraid_port->remote_identify.sas_address, + rphy_to_expander_device + (rphy), + hba_port->port_id); +#endif + return leapioraid_port; +out_fail: + list_for_each_entry_safe(leapioraid_phy, next, + &leapioraid_port->phy_list, port_siblings) + list_del(&leapioraid_phy->port_siblings); + kfree(leapioraid_port); + return NULL; +} + +void +leapioraid_transport_port_remove(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u64 sas_address_parent, + struct leapioraid_hba_port *port) +{ + int i; + unsigned long flags; + struct leapioraid_sas_port *leapioraid_port, *next; + struct leapioraid_raid_sas_node *sas_node; + u8 found = 0; +#if defined(LEAPIORAID_WIDE_PORT_API) + struct leapioraid_sas_phy *leapioraid_phy, *next_phy; +#endif + struct leapioraid_hba_port *hba_port, *hba_port_next = NULL; + struct leapioraid_virtual_phy *vphy, *vphy_next = NULL; + + if (!port) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = leapioraid_transport_sas_node_find_by_sas_address( + ioc, + sas_address_parent, + port); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + list_for_each_entry_safe(leapioraid_port, next, + &sas_node->sas_port_list, port_list) { + if (leapioraid_port->remote_identify.sas_address != sas_address) + continue; + if (leapioraid_port->hba_port != port) + continue; + found = 1; + list_del(&leapioraid_port->port_list); + goto out; + } +out: + if (!found) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + if ((sas_node->handle <= ioc->sas_hba.num_phys) && + (ioc->multipath_on_hba)) { + if (port->vphys_mask) { + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (vphy->sas_address != sas_address) + continue; + pr_err( + "%s remove vphy entry: %p of port:%p,\n\t\t" + "from %d port's vphys list\n", + ioc->name, + vphy, + port, + port->port_id); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + if (!port->vphys_mask && !port->sas_address) { + pr_err( + "%s remove hba_port entry: %p port: %d\n\t\t" + "from hba_port list\n", + ioc->name, + port, + port->port_id); + list_del(&port->list); + kfree(port); + } + } + list_for_each_entry_safe(hba_port, hba_port_next, + &ioc->port_table_list, list) { + if (hba_port != port) + continue; + if (hba_port->sas_address != sas_address) + continue; + if (!port->vphys_mask) { + pr_err( + "%s remove hba_port entry: %p port: %d\n\t\t" + "from hba_port list\n", + ioc->name, + hba_port, + hba_port->port_id); + list_del(&hba_port->list); + kfree(hba_port); + } else { + pr_err( + "%s clearing sas_address from hba_port entry: %p\n\t\t" + "port: %d from hba_port list\n", + ioc->name, + hba_port, + hba_port->port_id); + port->sas_address = 0; + } + break; + } + } + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address == sas_address) { + memset(&sas_node->phy[i].remote_identify, 0, + sizeof(struct sas_identify)); + sas_node->phy[i].hba_vphy = 0; + } + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +#if defined(LEAPIORAID_WIDE_PORT_API) + list_for_each_entry_safe(leapioraid_phy, next_phy, + &leapioraid_port->phy_list, port_siblings) { + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&leapioraid_port->port->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) + leapioraid_port->remote_identify.sas_address, + leapioraid_phy->phy_id); + leapioraid_phy->phy_belongs_to_port = 0; + if (!ioc->remove_host) + sas_port_delete_phy(leapioraid_port->port, + leapioraid_phy->phy); + list_del(&leapioraid_phy->port_siblings); + } + if (!ioc->remove_host) + sas_port_delete(leapioraid_port->port); + pr_info("%s %s: removed: sas_addr(0x%016llx)\n", + ioc->name, __func__, (unsigned long long)sas_address); +#else + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&leapioraid_port->rphy->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long)sas_address); + if (!ioc->remove_host) + sas_rphy_delete(leapioraid_port->rphy); + pr_info("%s %s: removed: sas_addr(0x%016llx)\n", + ioc->name, __func__, (unsigned long long)sas_address); +#endif + kfree(leapioraid_port); +} + +int +leapioraid_transport_add_host_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidSasPhyP0_t phy_pg0, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = leapioraid_phy->phy_id; + + INIT_LIST_HEAD(&leapioraid_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if ((leapioraid_transport_set_identify(ioc, leapioraid_phy->handle, + &leapioraid_phy->identify))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = leapioraid_phy->identify; + leapioraid_phy->attached_handle = + le16_to_cpu(phy_pg0.AttachedDevHandle); + if (leapioraid_phy->attached_handle) + leapioraid_transport_set_identify( + ioc, leapioraid_phy->attached_handle, + &leapioraid_phy->remote_identify); + phy->identify.phy_identifier = leapioraid_phy->phy_id; + phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & + LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.HwLinkRate & + LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.HwLinkRate >> 4); + phy->minimum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & + LEAPIORAID_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->hostdata = leapioraid_phy->port; +#if !defined(LEAPIORAID_WIDE_PORT_API_PLUS) + phy->local_attached = 1; +#endif +#if !defined(LEAPIORAID_WIDE_PORT_API) + phy->port_identifier = phy_index; +#endif + if ((sas_phy_add(phy))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + leapioraid_phy->handle, (unsigned long long) + leapioraid_phy->identify.sas_address, + leapioraid_phy->attached_handle, (unsigned long long) + leapioraid_phy->remote_identify.sas_address); + leapioraid_phy->phy = phy; + return 0; +} + +int +leapioraid_transport_add_expander_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidExpanderP1_t expander_pg1, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = leapioraid_phy->phy_id; + + INIT_LIST_HEAD(&leapioraid_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if ((leapioraid_transport_set_identify(ioc, leapioraid_phy->handle, + &leapioraid_phy->identify))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = leapioraid_phy->identify; + leapioraid_phy->attached_handle = + le16_to_cpu(expander_pg1.AttachedDevHandle); + if (leapioraid_phy->attached_handle) + leapioraid_transport_set_identify( + ioc, leapioraid_phy->attached_handle, + &leapioraid_phy->remote_identify); + phy->identify.phy_identifier = leapioraid_phy->phy_id; + phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.NegotiatedLinkRate & + LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.HwLinkRate & + LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.HwLinkRate >> 4); + phy->minimum_linkrate = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate & + LEAPIORAID_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate >> 4); + phy->hostdata = leapioraid_phy->port; +#if !defined(LEAPIORAID_WIDE_PORT_API) + phy->port_identifier = phy_index; +#endif + if ((sas_phy_add(phy))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + leapioraid_phy->handle, (unsigned long long) + leapioraid_phy->identify.sas_address, + leapioraid_phy->attached_handle, (unsigned long long) + leapioraid_phy->remote_identify.sas_address); + leapioraid_phy->phy = phy; + return 0; +} + +void +leapioraid_transport_update_links(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, + u8 link_rate, struct leapioraid_hba_port *port) +{ + unsigned long flags; + struct leapioraid_raid_sas_node *sas_node; + struct leapioraid_sas_phy *leapioraid_phy; + struct leapioraid_hba_port *hba_port = NULL; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = leapioraid_transport_sas_node_find_by_sas_address(ioc, + sas_address, port); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + leapioraid_phy = &sas_node->phy[phy_number]; + leapioraid_phy->attached_handle = handle; + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (handle && (link_rate >= LEAPIORAID_SAS_NEG_LINK_RATE_1_5)) { + leapioraid_transport_set_identify(ioc, handle, + &leapioraid_phy->remote_identify); +#if defined(LEAPIORAID_WIDE_PORT_API) + if ((sas_node->handle <= ioc->sas_hba.num_phys) && + (ioc->multipath_on_hba)) { + list_for_each_entry(hba_port, + &ioc->port_table_list, list) { + if (hba_port->sas_address == sas_address && + hba_port == port) + hba_port->phy_mask |= + (1 << leapioraid_phy->phy_id); + } + } + leapioraid_transport_add_phy_to_an_existing_port(ioc, sas_node, + leapioraid_phy, + leapioraid_phy->remote_identify.sas_address, + port); +#endif + } else + memset(&leapioraid_phy->remote_identify, 0, sizeof(struct + sas_identify)); + if (leapioraid_phy->phy) + leapioraid_phy->phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate(link_rate); + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&leapioraid_phy->phy->dev, + "refresh: parent sas_addr(0x%016llx),\n" + "\tlink_rate(0x%02x), phy(%d)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + (unsigned long long)sas_address, + link_rate, phy_number, handle, (unsigned long long) + leapioraid_phy->remote_identify.sas_address); +} + +static inline void *phy_to_ioc(struct sas_phy *phy) +{ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + + return leapioraid_shost_private(shost); +} + +static inline void *rphy_to_ioc(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); + + return leapioraid_shost_private(shost); +} + +struct leapioraid_phy_error_log_request { + u8 smp_frame_type; + u8 function; + u8 allocated_response_length; + u8 request_length; + u8 reserved_1[5]; + u8 phy_identifier; + u8 reserved_2[2]; +}; + +struct leapioraid_phy_error_log_reply { + u8 smp_frame_type; + u8 function; + u8 function_result; + u8 response_length; + __be16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 reserved_2[2]; + __be32 invalid_dword; + __be32 running_disparity_error; + __be32 loss_of_dword_sync; + __be32 phy_reset_problem; +}; + +static int +leapioraid_transport_get_expander_phy_error_log( + struct LEAPIORAID_ADAPTER *ioc, struct sas_phy *phy) +{ + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + struct leapioraid_phy_error_log_request *phy_error_log_request; + struct leapioraid_phy_error_log_reply *phy_error_log_reply; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->transport_cmds.mutex); + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + sz = sizeof(struct leapioraid_phy_error_log_request) + + sizeof(struct leapioraid_phy_error_log_reply); + data_out = + dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, + GFP_ATOMIC); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + rc = -EINVAL; + memset(data_out, 0, sz); + phy_error_log_request = data_out; + phy_error_log_request->smp_frame_type = 0x40; + phy_error_log_request->function = 0x11; + phy_error_log_request->request_length = 2; + phy_error_log_request->allocated_response_length = 0; + phy_error_log_request->phy_identifier = phy->number; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_sas_phy(phy); + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct leapioraid_phy_error_log_request)); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct leapioraid_phy_error_log_request), + data_out_dma + sizeof(struct leapioraid_phy_error_log_request), + sizeof(struct leapioraid_phy_error_log_reply)); + dtransportprintk(ioc, pr_info( + "%s phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n", + ioc->name, + (unsigned long long)phy->identify.sas_address, + phy->number)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + dtransportprintk(ioc, pr_info("%s phy_error_log - complete\n", ioc->name)); + if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, pr_err( + "%s phy_error_log - reply data transfer size(%d)\n", + ioc->name, + le16_to_cpu(mpi_reply->ResponseDataLength))); + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct leapioraid_phy_error_log_reply)) + goto out; + phy_error_log_reply = data_out + + sizeof(struct leapioraid_phy_error_log_request); + dtransportprintk(ioc, pr_err( + "%s phy_error_log - function_result(%d)\n", + ioc->name, + phy_error_log_reply->function_result)); + phy->invalid_dword_count = + be32_to_cpu(phy_error_log_reply->invalid_dword); + phy->running_disparity_error_count = + be32_to_cpu(phy_error_log_reply->running_disparity_error); + phy->loss_of_dword_sync_count = + be32_to_cpu(phy_error_log_reply->loss_of_dword_sync); + phy->phy_reset_problem_count = + be32_to_cpu(phy_error_log_reply->phy_reset_problem); + rc = 0; + } else + dtransportprintk(ioc, pr_err( + "%s phy_error_log - no reply\n", + ioc->name)); +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma); + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +static int +leapioraid_transport_get_linkerrors(struct sas_phy *phy) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasPhyP1_t phy_pg1; + int rc = 0; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return leapioraid_transport_get_expander_phy_error_log(ioc, phy); + if ((leapioraid_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, + phy->number))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + pr_info("%s phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + phy->number, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount); + phy->running_disparity_error_count = + le32_to_cpu(phy_pg1.RunningDisparityErrorCount); + phy->loss_of_dword_sync_count = + le32_to_cpu(phy_pg1.LossDwordSynchCount); + phy->phy_reset_problem_count = + le32_to_cpu(phy_pg1.PhyResetProblemCount); + return 0; +} + +static int +leapioraid_transport_get_enclosure_identifier( + struct sas_rphy *rphy, u64 *identifier) +{ + struct LEAPIORAID_ADAPTER *ioc = rphy_to_ioc(rphy); + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); + if (sas_device) { + *identifier = sas_device->enclosure_logical_id; + rc = 0; + leapioraid_sas_device_put(sas_device); + } else { + *identifier = 0; + rc = -ENXIO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +static int +leapioraid_transport_get_bay_identifier(struct sas_rphy *rphy) +{ + struct LEAPIORAID_ADAPTER *ioc = rphy_to_ioc(rphy); + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); + if (sas_device) { + rc = sas_device->slot; + leapioraid_sas_device_put(sas_device); + } else { + rc = -ENXIO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +struct leapioraid_phy_control_request { + u8 smp_frame_type; + u8 function; + u8 allocated_response_length; + u8 request_length; + u16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 phy_operation; + u8 reserved_2[13]; + u64 attached_device_name; + u8 programmed_min_physical_link_rate; + u8 programmed_max_physical_link_rate; + u8 reserved_3[6]; +}; + +struct leapioraid_phy_control_reply { + u8 smp_frame_type; + u8 function; + u8 function_result; + u8 response_length; +}; + +#define LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET (0x01) +#define LEAPIORAID_SMP_PHY_CONTROL_HARD_RESET (0x02) +#define LEAPIORAID_SMP_PHY_CONTROL_DISABLE (0x03) +static int +leapioraid_transport_expander_phy_control( + struct LEAPIORAID_ADAPTER *ioc, + struct sas_phy *phy, u8 phy_operation) +{ + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + struct leapioraid_phy_control_request *phy_control_request; + struct leapioraid_phy_control_reply *phy_control_reply; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->transport_cmds.mutex); + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + sz = sizeof(struct leapioraid_phy_control_request) + + sizeof(struct leapioraid_phy_control_reply); + data_out = + dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, + GFP_ATOMIC); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + rc = -EINVAL; + memset(data_out, 0, sz); + phy_control_request = data_out; + phy_control_request->smp_frame_type = 0x40; + phy_control_request->function = 0x91; + phy_control_request->request_length = 9; + phy_control_request->allocated_response_length = 0; + phy_control_request->phy_identifier = phy->number; + phy_control_request->phy_operation = phy_operation; + phy_control_request->programmed_min_physical_link_rate = + phy->minimum_linkrate << 4; + phy_control_request->programmed_max_physical_link_rate = + phy->maximum_linkrate << 4; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_sas_phy(phy); + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct leapioraid_phy_error_log_request)); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct leapioraid_phy_control_request), + data_out_dma + sizeof(struct leapioraid_phy_control_request), + sizeof(struct leapioraid_phy_control_reply)); + dtransportprintk(ioc, pr_info( + "%s phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", + ioc->name, + (unsigned long long)phy->identify.sas_address, + phy->number, phy_operation)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + dtransportprintk(ioc, pr_info( + "%s phy_control - complete\n", ioc->name)); + if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, pr_err( + "%s phy_control - reply data transfer size(%d)\n", + ioc->name, + le16_to_cpu(mpi_reply->ResponseDataLength))); + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct leapioraid_phy_control_reply)) + goto out; + phy_control_reply = data_out + + sizeof(struct leapioraid_phy_control_request); + dtransportprintk(ioc, pr_err( + "%s phy_control - function_result(%d)\n", + ioc->name, + phy_control_reply->function_result)); + rc = 0; + } else + dtransportprintk(ioc, pr_err( + "%s phy_control - no reply\n", + ioc->name)); +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma); + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +static int +leapioraid_transport_phy_reset(struct sas_phy *phy, int hard_reset) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidSasIoUnitControlRep_t mpi_reply; + struct LeapioraidSasIoUnitControlReq_t mpi_request; + int rc = 0; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return leapioraid_transport_expander_phy_control(ioc, phy, + (hard_reset == + 1) ? + LEAPIORAID_SMP_PHY_CONTROL_HARD_RESET + : + LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET); + memset(&mpi_request, 0, sizeof(struct LeapioraidSasIoUnitControlReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL; + mpi_request.Operation = hard_reset ? + LEAPIORAID_SAS_OP_PHY_HARD_RESET : LEAPIORAID_SAS_OP_PHY_LINK_RESET; + mpi_request.PhyNum = phy->number; + if ((leapioraid_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + pr_info("%s phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + phy->number, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + return 0; +} + +static int +leapioraid_transport_phy_enable(struct sas_phy *phy, int enable) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 sz; + int rc = 0; + int i, discovery_active; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return leapioraid_transport_expander_phy_control(ioc, phy, + (enable == + 1) ? + LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET + : + LEAPIORAID_SMP_PHY_CONTROL_DISABLE); + sz = offsetof(struct LeapioraidSasIOUnitP0_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys; i++) { + if (sas_iounit_pg0->PhyData[i].PortFlags & + LEAPIORAID_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) { + pr_err( + "%s discovery is active on port = %d, phy = %d:\n\t\t" + "unable to enable/disable phys, try again later!\n", + ioc->name, + sas_iounit_pg0->PhyData[i].Port, + i); + discovery_active = 1; + } + } + if (discovery_active) { + rc = -EAGAIN; + goto out; + } + sz = offsetof(struct LeapioraidSasIOUnitP1_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + sas_iounit_pg1->PhyData[i].Port = + sas_iounit_pg0->PhyData[i].Port; + sas_iounit_pg1->PhyData[i].PortFlags = + (sas_iounit_pg0->PhyData[i].PortFlags & + LEAPIORAID_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG); + sas_iounit_pg1->PhyData[i].PhyFlags = + (sas_iounit_pg0->PhyData[i].PhyFlags & + (LEAPIORAID_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED + + LEAPIORAID_SASIOUNIT0_PHYFLAGS_PHY_DISABLED)); + } + if (enable) + sas_iounit_pg1->PhyData[phy->number].PhyFlags + &= ~LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + else + sas_iounit_pg1->PhyData[phy->number].PhyFlags + |= LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + leapioraid_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz); + if (enable) + leapioraid_transport_phy_reset(phy, 0); +out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); + return rc; +} + +static int +leapioraid_transport_phy_speed( + struct sas_phy *phy, struct sas_phy_linkrates *rates) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + struct LeapioraidSasPhyP0_t phy_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 sz; + int i; + int rc = 0; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (!rates->minimum_linkrate) + rates->minimum_linkrate = phy->minimum_linkrate; + else if (rates->minimum_linkrate < phy->minimum_linkrate_hw) + rates->minimum_linkrate = phy->minimum_linkrate_hw; + if (!rates->maximum_linkrate) + rates->maximum_linkrate = phy->maximum_linkrate; + else if (rates->maximum_linkrate > phy->maximum_linkrate_hw) + rates->maximum_linkrate = phy->maximum_linkrate_hw; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) { + phy->minimum_linkrate = rates->minimum_linkrate; + phy->maximum_linkrate = rates->maximum_linkrate; + return leapioraid_transport_expander_phy_control(ioc, phy, + LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET); + } + sz = offsetof(struct LeapioraidSasIOUnitP1_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if (phy->number != i) { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (ioc->sas_hba.phy[i].phy->minimum_linkrate + + (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4)); + } else { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (rates->minimum_linkrate + + (rates->maximum_linkrate << 4)); + } + } + if (leapioraid_config_set_sas_iounit_pg1 + (ioc, &mpi_reply, sas_iounit_pg1, sz)) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + leapioraid_transport_phy_reset(phy, 0); + if (!leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + phy->number)) { + phy->minimum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & + LEAPIORAID_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & + LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + } +out: + kfree(sas_iounit_pg1); + return rc; +} + +static int +leapioraid_transport_map_smp_buffer( + struct device *dev, struct bsg_buffer *buf, + dma_addr_t *dma_addr, size_t *dma_len, void **p) +{ + if (buf->sg_cnt > 1) { + *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr, + GFP_KERNEL); + if (!*p) + return -ENOMEM; + *dma_len = buf->payload_len; + } else { + if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL)) + return -ENOMEM; + *dma_addr = sg_dma_address(buf->sg_list); + *dma_len = sg_dma_len(buf->sg_list); + *p = NULL; + } + return 0; +} + +static void +leapioraid_transport_unmap_smp_buffer( + struct device *dev, struct bsg_buffer *buf, + dma_addr_t dma_addr, void *p) +{ + if (p) + dma_free_coherent(dev, buf->payload_len, p, dma_addr); + else + dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL); +} + +static void +leapioraid_transport_smp_handler( + struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) +{ + struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost); + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + int rc; + u16 smid; + u32 ioc_state; + void *psge; + dma_addr_t dma_addr_in; + dma_addr_t dma_addr_out; + void *addr_in = NULL; + void *addr_out = NULL; + size_t dma_len_in; + size_t dma_len_out; + u16 wait_state_count; + unsigned int reslen = 0; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + rc = -EFAULT; + goto job_done; + } + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); + if (rc) + goto job_done; + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + rc = -EAGAIN; + goto job_done; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_transport_map_smp_buffer( + &ioc->pdev->dev, &job->request_payload, + &dma_addr_out, &dma_len_out, &addr_out); + if (rc) + goto out; + if (addr_out) { + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, addr_out, + job->request_payload.payload_len); + } + rc = leapioraid_transport_map_smp_buffer( + &ioc->pdev->dev, &job->reply_payload, + &dma_addr_in, &dma_len_in, &addr_in); + if (rc) + goto unmap_out; + wait_state_count = 0; + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + while (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err( + "%s %s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto unmap_in; + } + ssleep(1); + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + pr_info( + "%s %s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info("%s %s: ioc is operational\n", + ioc->name, __func__); + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto unmap_in; + } + rc = 0; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_rphy( + ioc, rphy); + mpi_request->SASAddress = (rphy) ? + cpu_to_le64(rphy->identify.sas_address) : + cpu_to_le64(ioc->sas_hba.sas_address); + mpi_request->RequestDataLength = cpu_to_le16(dma_len_out - 4); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in, + dma_len_in - 4); + dtransportprintk(ioc, pr_info( + "%s %s - sending smp request\n", ioc->name, + __func__)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s : timeout\n", __func__, ioc->name); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) { + leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + rc = -ETIMEDOUT; + goto unmap_in; + } + } + dtransportprintk(ioc, pr_info( + "%s %s - complete\n", ioc->name, __func__)); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID)) { + dtransportprintk(ioc, pr_info( + "%s %s - no reply\n", ioc->name, + __func__)); + rc = -ENXIO; + goto unmap_in; + } + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, + pr_info( + "%s %s - reply data transfer size(%d)\n", + ioc->name, __func__, + le16_to_cpu(mpi_reply->ResponseDataLength))); + memcpy(job->reply, mpi_reply, sizeof(*mpi_reply)); + job->reply_len = sizeof(*mpi_reply); + reslen = le16_to_cpu(mpi_reply->ResponseDataLength); + if (addr_in) { + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, addr_in, + job->reply_payload.payload_len); + } + rc = 0; +unmap_in: + leapioraid_transport_unmap_smp_buffer( + &ioc->pdev->dev, &job->reply_payload, + dma_addr_in, addr_in); +unmap_out: + leapioraid_transport_unmap_smp_buffer( + &ioc->pdev->dev, &job->request_payload, + dma_addr_out, addr_out); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->transport_cmds.mutex); +job_done: + bsg_job_done(job, rc, reslen); +} + +struct sas_function_template leapioraid_transport_functions = { + .get_linkerrors = leapioraid_transport_get_linkerrors, + .get_enclosure_identifier = leapioraid_transport_get_enclosure_identifier, + .get_bay_identifier = leapioraid_transport_get_bay_identifier, + .phy_reset = leapioraid_transport_phy_reset, + .phy_enable = leapioraid_transport_phy_enable, + .set_phy_speed = leapioraid_transport_phy_speed, + .smp_handler = leapioraid_transport_smp_handler, +}; + +struct scsi_transport_template *leapioraid_transport_template; \ No newline at end of file