Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/dev/ic/mfi.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/dev/ic/mfi.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.36.8.2 retrieving revision 1.36.8.3 diff -u -p -r1.36.8.2 -r1.36.8.3 --- src/sys/dev/ic/mfi.c 2012/03/22 23:04:27 1.36.8.2 +++ src/sys/dev/ic/mfi.c 2012/10/24 03:19:19 1.36.8.3 @@ -1,5 +1,30 @@ -/* $NetBSD: mfi.c,v 1.36.8.2 2012/03/22 23:04:27 riz Exp $ */ +/* $NetBSD: mfi.c,v 1.36.8.3 2012/10/24 03:19:19 riz Exp $ */ /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */ + +/* + * Copyright (c) 2012 Manuel Bouyer. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + /* * Copyright (c) 2006 Marco Peereboom * @@ -16,8 +41,39 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + /*- + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Copyright 1994-2009 The FreeBSD Project. + * All rights reserved. + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation + * are those of the authors and should not be interpreted as representing + * official policies,either expressed or implied, of the FreeBSD Project. + */ + #include -__KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.36.8.2 2012/03/22 23:04:27 riz Exp $"); +__KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.36.8.3 2012/10/24 03:19:19 riz Exp $"); #include "bio.h" @@ -29,6 +85,7 @@ __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.36 #include #include #include +#include #include @@ -50,14 +107,15 @@ __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.36 #ifdef MFI_DEBUG uint32_t mfi_debug = 0 -/* | MFI_D_CMD */ +/* | MFI_D_CMD */ /* | MFI_D_INTR */ /* | MFI_D_MISC */ /* | MFI_D_DMA */ - | MFI_D_IOCTL +/* | MFI_D_IOCTL */ /* | MFI_D_RW */ /* | MFI_D_MEM */ /* | MFI_D_CCB */ +/* | MFI_D_SYNC */ ; #endif @@ -75,6 +133,12 @@ static void mfi_freemem(struct mfi_soft static int mfi_transition_firmware(struct mfi_softc *); static int mfi_initialize_firmware(struct mfi_softc *); static int mfi_get_info(struct mfi_softc *); +static int mfi_get_bbu(struct mfi_softc *, + struct mfi_bbu_status *); +/* return codes for mfi_get_bbu */ +#define MFI_BBU_GOOD 0 +#define MFI_BBU_BAD 1 +#define MFI_BBU_UNKNOWN 2 static uint32_t mfi_read(struct mfi_softc *, bus_size_t); static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t); static int mfi_poll(struct mfi_ccb *); @@ -82,11 +146,12 @@ static int mfi_create_sgl(struct mfi_cc /* commands */ static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *); -static int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *, - uint32_t, uint32_t); -static void mfi_scsi_xs_done(struct mfi_ccb *); -static int mfi_mgmt_internal(struct mfi_softc *, - uint32_t, uint32_t, uint32_t, void *, uint8_t *); +static int mfi_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *, + uint64_t, uint32_t); +static void mfi_scsi_ld_done(struct mfi_ccb *); +static void mfi_scsi_xs_done(struct mfi_ccb *, int, int); +static int mfi_mgmt_internal(struct mfi_softc *, uint32_t, + uint32_t, uint32_t, void *, uint8_t *, bool); static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *, uint32_t, uint32_t, uint32_t, void *, uint8_t *); static void mfi_mgmt_done(struct mfi_ccb *); @@ -108,6 +173,9 @@ static int mfi_destroy_sensors(struct m static void mfi_sensor_refresh(struct sysmon_envsys *, envsys_data_t *); #endif /* NBIO > 0 */ +static bool mfi_shutdown(device_t, int); +static bool mfi_suspend(device_t, const pmf_qual_t *); +static bool mfi_resume(device_t, const pmf_qual_t *); static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc); static void mfi_xscale_intr_ena(struct mfi_softc *sc); @@ -120,7 +188,8 @@ static const struct mfi_iop_ops mfi_iop_ mfi_xscale_intr_dis, mfi_xscale_intr_ena, mfi_xscale_intr, - mfi_xscale_post + mfi_xscale_post, + mfi_scsi_ld_io, }; static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc); @@ -134,7 +203,8 @@ static const struct mfi_iop_ops mfi_iop_ mfi_ppc_intr_dis, mfi_ppc_intr_ena, mfi_ppc_intr, - mfi_ppc_post + mfi_ppc_post, + mfi_scsi_ld_io, }; uint32_t mfi_gen2_fw_state(struct mfi_softc *sc); @@ -148,7 +218,8 @@ static const struct mfi_iop_ops mfi_iop_ mfi_gen2_intr_dis, mfi_gen2_intr_ena, mfi_gen2_intr, - mfi_gen2_post + mfi_gen2_post, + mfi_scsi_ld_io, }; u_int32_t mfi_skinny_fw_state(struct mfi_softc *); @@ -162,7 +233,33 @@ static const struct mfi_iop_ops mfi_iop_ mfi_skinny_intr_dis, mfi_skinny_intr_ena, mfi_skinny_intr, - mfi_skinny_post + mfi_skinny_post, + mfi_scsi_ld_io, +}; + +static int mfi_tbolt_init_desc_pool(struct mfi_softc *); +static int mfi_tbolt_init_MFI_queue(struct mfi_softc *); +static void mfi_tbolt_build_mpt_ccb(struct mfi_ccb *); +int mfi_tbolt_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *, + uint64_t, uint32_t); +static void mfi_tbolt_scsi_ld_done(struct mfi_ccb *); +static int mfi_tbolt_create_sgl(struct mfi_ccb *, int); +void mfi_tbolt_sync_map_info(struct work *, void *); +static void mfi_sync_map_complete(struct mfi_ccb *); + +u_int32_t mfi_tbolt_fw_state(struct mfi_softc *); +void mfi_tbolt_intr_dis(struct mfi_softc *); +void mfi_tbolt_intr_ena(struct mfi_softc *); +int mfi_tbolt_intr(struct mfi_softc *sc); +void mfi_tbolt_post(struct mfi_softc *, struct mfi_ccb *); + +static const struct mfi_iop_ops mfi_iop_tbolt = { + mfi_tbolt_fw_state, + mfi_tbolt_intr_dis, + mfi_tbolt_intr_ena, + mfi_tbolt_intr, + mfi_tbolt_post, + mfi_tbolt_scsi_ld_io, }; #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s)) @@ -186,6 +283,8 @@ mfi_get_ccb(struct mfi_softc *sc) splx(s); DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb); + if (__predict_false(ccb == NULL && sc->sc_running)) + aprint_error_dev(sc->sc_dev, "out of ccb\n"); return ccb; } @@ -211,7 +310,12 @@ mfi_put_ccb(struct mfi_ccb *ccb) ccb->ccb_sgl = NULL; ccb->ccb_data = NULL; ccb->ccb_len = 0; - + if (sc->sc_ioptype == MFI_IOP_TBOLT) { + /* erase tb_request_desc but preserve SMID */ + int index = ccb->ccb_tb_request_desc.header.SMID; + ccb->ccb_tb_request_desc.words = 0; + ccb->ccb_tb_request_desc.header.SMID = index; + } s = splbio(); TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link); splx(s); @@ -223,12 +327,12 @@ mfi_destroy_ccb(struct mfi_softc *sc) struct mfi_ccb *ccb; uint32_t i; - DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc)); + DNPRINTF(MFI_D_CCB, "%s: mfi_destroy_ccb\n", DEVNAME(sc)); for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) { /* create a dma map for transfer */ - bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); + bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap); } if (i < sc->sc_max_cmds) @@ -245,11 +349,24 @@ mfi_init_ccb(struct mfi_softc *sc) struct mfi_ccb *ccb; uint32_t i; int error; + bus_addr_t io_req_base_phys; + uint8_t *io_req_base; + int offset; DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc)); sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds, M_DEVBUF, M_WAITOK|M_ZERO); + if (sc->sc_ioptype == MFI_IOP_TBOLT) { + /* + * The first 256 bytes (SMID 0) is not used. + * Don't add to the cmd list. + */ + io_req_base = (uint8_t *)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool) + + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; + io_req_base_phys = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) + + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; + } for (i = 0; i < sc->sc_max_cmds; i++) { ccb = &sc->sc_ccb[i]; @@ -270,14 +387,30 @@ mfi_init_ccb(struct mfi_softc *sc) (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i); /* create a dma map for transfer */ - error = bus_dmamap_create(sc->sc_dmat, + error = bus_dmamap_create(sc->sc_datadmat, MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap); if (error) { - printf("%s: cannot create ccb dmamap (%d)\n", - DEVNAME(sc), error); + aprint_error_dev(sc->sc_dev, + "cannot create ccb dmamap (%d)\n", error); goto destroy; } + if (sc->sc_ioptype == MFI_IOP_TBOLT) { + offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i; + ccb->ccb_tb_io_request = + (struct mfi_mpi2_request_raid_scsi_io *) + (io_req_base + offset); + ccb->ccb_tb_pio_request = + io_req_base_phys + offset; + offset = MEGASAS_MAX_SZ_CHAIN_FRAME * i; + ccb->ccb_tb_sg_frame = + (mpi2_sge_io_union *)(sc->sc_reply_pool_limit + + offset); + ccb->ccb_tb_psg_frame = sc->sc_sg_frame_busaddr + + offset; + /* SMID 0 is reserved. Set SMID/index from 1 */ + ccb->ccb_tb_request_desc.header.SMID = i + 1; + } DNPRINTF(MFI_D_CCB, "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n", @@ -296,7 +429,7 @@ destroy: while (i) { i--; ccb = &sc->sc_ccb[i]; - bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); + bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap); } free(sc->sc_ccb, M_DEVBUF); @@ -413,17 +546,19 @@ mfi_transition_firmware(struct mfi_softc cur_state = fw_state; switch (fw_state) { case MFI_STATE_FAULT: - printf("%s: firmware fault\n", DEVNAME(sc)); + aprint_error_dev(sc->sc_dev, "firmware fault\n"); return 1; case MFI_STATE_WAIT_HANDSHAKE: - if (sc->sc_flags & MFI_IOP_SKINNY) + if (sc->sc_ioptype == MFI_IOP_SKINNY || + sc->sc_ioptype == MFI_IOP_TBOLT) mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE); else mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE); max_wait = 2; break; case MFI_STATE_OPERATIONAL: - if (sc->sc_flags & MFI_IOP_SKINNY) + if (sc->sc_ioptype == MFI_IOP_SKINNY || + sc->sc_ioptype == MFI_IOP_TBOLT) mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY); else mfi_write(sc, MFI_IDB, MFI_INIT_READY); @@ -438,9 +573,16 @@ mfi_transition_firmware(struct mfi_softc case MFI_STATE_FLUSH_CACHE: max_wait = 20; break; + case MFI_STATE_BOOT_MESSAGE_PENDING: + if (sc->sc_ioptype == MFI_IOP_TBOLT) { + mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG); + max_wait = 180; + break; + } + /* FALLTHROUGH */ default: - printf("%s: unknown firmware state %d\n", - DEVNAME(sc), fw_state); + aprint_error_dev(sc->sc_dev, + "unknown firmware state %d\n", fw_state); return 1; } for (i = 0; i < (max_wait * 10); i++) { @@ -451,8 +593,8 @@ mfi_transition_firmware(struct mfi_softc break; } if (fw_state == cur_state) { - printf("%s: firmware stuck in state %#x\n", - DEVNAME(sc), fw_state); + aprint_error_dev(sc->sc_dev, + "firmware stuck in state %#x\n", fw_state); return 1; } } @@ -494,7 +636,8 @@ mfi_initialize_firmware(struct mfi_softc qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo); if (mfi_poll(ccb)) { - printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc)); + aprint_error_dev(sc->sc_dev, + "mfi_initialize_firmware failed\n"); return 1; } @@ -512,7 +655,7 @@ mfi_get_info(struct mfi_softc *sc) DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc)); if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN, - sizeof(sc->sc_info), &sc->sc_info, NULL)) + sizeof(sc->sc_info), &sc->sc_info, NULL, cold ? true : false)) return 1; #ifdef MFI_DEBUG @@ -646,7 +789,7 @@ mfi_get_info(struct mfi_softc *sc) sc->sc_info.mci_host.mih_port_count); for (i = 0; i < 8; i++) - printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]); + printf("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]); printf("\n"); printf("%s: type %.x port_count %d port_addr ", @@ -654,14 +797,73 @@ mfi_get_info(struct mfi_softc *sc) sc->sc_info.mci_device.mid_type, sc->sc_info.mci_device.mid_port_count); - for (i = 0; i < 8; i++) - printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]); + for (i = 0; i < 8; i++) { + printf("%.0" PRIx64 " ", + sc->sc_info.mci_device.mid_port_addr[i]); + } printf("\n"); #endif /* MFI_DEBUG */ return 0; } +static int +mfi_get_bbu(struct mfi_softc *sc, struct mfi_bbu_status *stat) +{ + DNPRINTF(MFI_D_MISC, "%s: mfi_get_bbu\n", DEVNAME(sc)); + + if (mfi_mgmt_internal(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN, + sizeof(*stat), stat, NULL, cold ? true : false)) + return MFI_BBU_UNKNOWN; +#ifdef MFI_DEBUG + printf("bbu type %d, voltage %d, current %d, temperature %d, " + "status 0x%x\n", stat->battery_type, stat->voltage, stat->current, + stat->temperature, stat->fw_status); + printf("details: "); + switch(stat->battery_type) { + case MFI_BBU_TYPE_IBBU: + printf("guage %d relative charge %d charger state %d " + "charger ctrl %d\n", stat->detail.ibbu.gas_guage_status, + stat->detail.ibbu.relative_charge , + stat->detail.ibbu.charger_system_state , + stat->detail.ibbu.charger_system_ctrl); + printf("\tcurrent %d abs charge %d max error %d\n", + stat->detail.ibbu.charging_current , + stat->detail.ibbu.absolute_charge , + stat->detail.ibbu.max_error); + break; + case MFI_BBU_TYPE_BBU: + printf("guage %d relative charge %d charger state %d\n", + stat->detail.ibbu.gas_guage_status, + stat->detail.bbu.relative_charge , + stat->detail.bbu.charger_status ); + printf("\trem capacity %d fyll capacity %d SOH %d\n", + stat->detail.bbu.remaining_capacity , + stat->detail.bbu.full_charge_capacity , + stat->detail.bbu.is_SOH_good); + default: + printf("\n"); + } +#endif + switch(stat->battery_type) { + case MFI_BBU_TYPE_BBU: + return (stat->detail.bbu.is_SOH_good ? + MFI_BBU_GOOD : MFI_BBU_BAD); + case MFI_BBU_TYPE_NONE: + return MFI_BBU_UNKNOWN; + default: + if (stat->fw_status & + (MFI_BBU_STATE_PACK_MISSING | + MFI_BBU_STATE_VOLTAGE_LOW | + MFI_BBU_STATE_TEMPERATURE_HIGH | + MFI_BBU_STATE_LEARN_CYC_FAIL | + MFI_BBU_STATE_LEARN_CYC_TIMEOUT | + MFI_BBU_STATE_I2C_ERR_DETECT)) + return MFI_BBU_BAD; + return MFI_BBU_GOOD; + } +} + static void mfiminphys(struct buf *bp) { @@ -715,8 +917,15 @@ mfi_detach(struct mfi_softc *sc, int fla #endif /* NBIO > 0 */ mfi_intr_disable(sc); + mfi_shutdown(sc->sc_dev, 0); - /* TBD: shutdown firmware */ + if (sc->sc_ioptype == MFI_IOP_TBOLT) { + workqueue_destroy(sc->sc_ldsync_wq); + mfi_put_ccb(sc->sc_ldsync_ccb); + mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool); + mfi_freemem(sc, &sc->sc_tbolt_ioc_init); + mfi_freemem(sc, &sc->sc_tbolt_verbuf); + } if ((error = mfi_destroy_ccb(sc)) != 0) return error; @@ -730,16 +939,63 @@ mfi_detach(struct mfi_softc *sc, int fla return 0; } +static bool +mfi_shutdown(device_t dev, int how) +{ + struct mfi_softc *sc = device_private(dev); + uint8_t mbox[MFI_MBOX_SIZE]; + int s = splbio(); + DNPRINTF(MFI_D_MISC, "%s: mfi_shutdown\n", DEVNAME(sc)); + if (sc->sc_running) { + mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; + if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_CACHE_FLUSH, + MFI_DATA_NONE, 0, NULL, mbox, true)) { + aprint_error_dev(dev, "shutdown: cache flush failed\n"); + goto fail; + } + + mbox[0] = 0; + if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_SHUTDOWN, + MFI_DATA_NONE, 0, NULL, mbox, true)) { + aprint_error_dev(dev, "shutdown: " + "firmware shutdown failed\n"); + goto fail; + } + sc->sc_running = false; + } + splx(s); + return true; +fail: + splx(s); + return false; +} + +static bool +mfi_suspend(device_t dev, const pmf_qual_t *q) +{ + /* XXX to be implemented */ + return false; +} + +static bool +mfi_resume(device_t dev, const pmf_qual_t *q) +{ + /* XXX to be implemented */ + return false; +} + int mfi_attach(struct mfi_softc *sc, enum mfi_iop iop) { struct scsipi_adapter *adapt = &sc->sc_adapt; struct scsipi_channel *chan = &sc->sc_chan; - uint32_t status, frames; + uint32_t status, frames, max_sgl; int i; DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc)); + sc->sc_ioptype = iop; + switch (iop) { case MFI_IOP_XSCALE: sc->sc_iop = &mfi_iop_xscale; @@ -753,6 +1009,9 @@ mfi_attach(struct mfi_softc *sc, enum mf case MFI_IOP_SKINNY: sc->sc_iop = &mfi_iop_skinny; break; + case MFI_IOP_TBOLT: + sc->sc_iop = &mfi_iop_tbolt; + break; default: panic("%s: unknown iop %d", DEVNAME(sc), iop); } @@ -764,16 +1023,75 @@ mfi_attach(struct mfi_softc *sc, enum mf status = mfi_fw_state(sc); sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK; - sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16; + max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16; + if (sc->sc_ioptype == MFI_IOP_TBOLT) { + sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1); + sc->sc_sgl_size = sizeof(struct mfi_sg_ieee); + } else if (sc->sc_64bit_dma) { + sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1); + sc->sc_sgl_size = sizeof(struct mfi_sg64); + } else { + sc->sc_max_sgl = max_sgl; + sc->sc_sgl_size = sizeof(struct mfi_sg32); + } DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n", DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl); + if (sc->sc_ioptype == MFI_IOP_TBOLT) { + uint32_t tb_mem_size; + /* for Alignment */ + tb_mem_size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; + + tb_mem_size += + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1); + sc->sc_reply_pool_size = + ((sc->sc_max_cmds + 1 + 15) / 16) * 16; + tb_mem_size += + MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size; + + /* this is for SGL's */ + tb_mem_size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->sc_max_cmds; + sc->sc_tbolt_reqmsgpool = mfi_allocmem(sc, tb_mem_size); + if (sc->sc_tbolt_reqmsgpool == NULL) { + aprint_error_dev(sc->sc_dev, + "unable to allocate thunderbolt " + "request message pool\n"); + goto nopcq; + } + if (mfi_tbolt_init_desc_pool(sc)) { + aprint_error_dev(sc->sc_dev, + "Thunderbolt pool preparation error\n"); + goto nopcq; + } + + /* + * Allocate DMA memory mapping for MPI2 IOC Init descriptor, + * we are taking it diffrent from what we have allocated for + * Request and reply descriptors to avoid confusion later + */ + sc->sc_tbolt_ioc_init = mfi_allocmem(sc, + sizeof(struct mpi2_ioc_init_request)); + if (sc->sc_tbolt_ioc_init == NULL) { + aprint_error_dev(sc->sc_dev, + "unable to allocate thunderbolt IOC init memory"); + goto nopcq; + } + + sc->sc_tbolt_verbuf = mfi_allocmem(sc, + MEGASAS_MAX_NAME*sizeof(bus_addr_t)); + if (sc->sc_tbolt_verbuf == NULL) { + aprint_error_dev(sc->sc_dev, + "unable to allocate thunderbolt version buffer\n"); + goto nopcq; + } + + } /* consumer/producer and reply queue memory */ sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) + sizeof(struct mfi_prod_cons)); if (sc->sc_pcq == NULL) { - aprint_error("%s: unable to allocate reply queue memory\n", - DEVNAME(sc)); + aprint_error_dev(sc->sc_dev, + "unable to allocate reply queue memory\n"); goto nopcq; } bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0, @@ -781,55 +1099,100 @@ mfi_attach(struct mfi_softc *sc, enum mf BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* frame memory */ - /* we are not doing 64 bit IO so only calculate # of 32 bit frames */ - frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl + - MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1; + frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) / + MFI_FRAME_SIZE + 1; sc->sc_frames_size = frames * MFI_FRAME_SIZE; sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds); if (sc->sc_frames == NULL) { - aprint_error("%s: unable to allocate frame memory\n", - DEVNAME(sc)); + aprint_error_dev(sc->sc_dev, + "unable to allocate frame memory\n"); goto noframe; } /* XXX hack, fix this */ if (MFIMEM_DVA(sc->sc_frames) & 0x3f) { - aprint_error("%s: improper frame alignment (%#llx) FIXME\n", - DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames)); + aprint_error_dev(sc->sc_dev, + "improper frame alignment (%#llx) FIXME\n", + (long long int)MFIMEM_DVA(sc->sc_frames)); goto noframe; } /* sense memory */ sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE); if (sc->sc_sense == NULL) { - aprint_error("%s: unable to allocate sense memory\n", - DEVNAME(sc)); + aprint_error_dev(sc->sc_dev, + "unable to allocate sense memory\n"); goto nosense; } /* now that we have all memory bits go initialize ccbs */ if (mfi_init_ccb(sc)) { - aprint_error("%s: could not init ccb list\n", DEVNAME(sc)); + aprint_error_dev(sc->sc_dev, "could not init ccb list\n"); goto noinit; } /* kickstart firmware with all addresses and pointers */ - if (mfi_initialize_firmware(sc)) { - aprint_error("%s: could not initialize firmware\n", - DEVNAME(sc)); - goto noinit; + if (sc->sc_ioptype == MFI_IOP_TBOLT) { + if (mfi_tbolt_init_MFI_queue(sc)) { + aprint_error_dev(sc->sc_dev, + "could not initialize firmware\n"); + goto noinit; + } + } else { + if (mfi_initialize_firmware(sc)) { + aprint_error_dev(sc->sc_dev, + "could not initialize firmware\n"); + goto noinit; + } } + sc->sc_running = true; if (mfi_get_info(sc)) { - aprint_error("%s: could not retrieve controller information\n", - DEVNAME(sc)); + aprint_error_dev(sc->sc_dev, + "could not retrieve controller information\n"); goto noinit; } + aprint_normal_dev(sc->sc_dev, + "%s version %s\n", + sc->sc_info.mci_product_name, + sc->sc_info.mci_package_version); - aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n", - DEVNAME(sc), + + aprint_normal_dev(sc->sc_dev, "logical drives %d, %dMB RAM, ", sc->sc_info.mci_lds_present, - sc->sc_info.mci_package_version, sc->sc_info.mci_memory_size); + sc->sc_bbuok = false; + if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) { + struct mfi_bbu_status bbu_stat; + int mfi_bbu_status = mfi_get_bbu(sc, &bbu_stat); + aprint_normal("BBU type "); + switch (bbu_stat.battery_type) { + case MFI_BBU_TYPE_BBU: + aprint_normal("BBU"); + break; + case MFI_BBU_TYPE_IBBU: + aprint_normal("IBBU"); + break; + default: + aprint_normal("unknown type %d", bbu_stat.battery_type); + } + aprint_normal(", status "); + switch(mfi_bbu_status) { + case MFI_BBU_GOOD: + aprint_normal("good\n"); + sc->sc_bbuok = true; + break; + case MFI_BBU_BAD: + aprint_normal("bad\n"); + break; + case MFI_BBU_UNKNOWN: + aprint_normal("unknown\n"); + break; + default: + panic("mfi_bbu_status"); + } + } else { + aprint_normal("BBU not present\n"); + } sc->sc_ld_cnt = sc->sc_info.mci_lds_present; sc->sc_max_ld = sc->sc_ld_cnt; @@ -839,8 +1202,9 @@ mfi_attach(struct mfi_softc *sc, enum mf memset(adapt, 0, sizeof(*adapt)); adapt->adapt_dev = sc->sc_dev; adapt->adapt_nchannels = 1; - if (sc->sc_ld_cnt) - adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt; + /* keep a few commands for management */ + if (sc->sc_max_cmds > 4) + adapt->adapt_openings = sc->sc_max_cmds - 4; else adapt->adapt_openings = sc->sc_max_cmds; adapt->adapt_max_periph = adapt->adapt_openings; @@ -849,7 +1213,7 @@ mfi_attach(struct mfi_softc *sc, enum mf memset(chan, 0, sizeof(*chan)); chan->chan_adapter = adapt; - chan->chan_bustype = &scsi_bustype; + chan->chan_bustype = &scsi_sas_bustype; chan->chan_channel = 0; chan->chan_flags = 0; chan->chan_nluns = 8; @@ -865,8 +1229,13 @@ mfi_attach(struct mfi_softc *sc, enum mf if (bio_register(sc->sc_dev, mfi_ioctl) != 0) panic("%s: controller registration failed", DEVNAME(sc)); if (mfi_create_sensors(sc) != 0) - aprint_error("%s: unable to create sensors\n", DEVNAME(sc)); + aprint_error_dev(sc->sc_dev, "unable to create sensors\n"); #endif /* NBIO > 0 */ + if (!pmf_device_register1(sc->sc_dev, mfi_suspend, mfi_resume, + mfi_shutdown)) { + aprint_error_dev(sc->sc_dev, + "couldn't establish power handler\n"); + } return 0; noinit: @@ -876,6 +1245,12 @@ nosense: noframe: mfi_freemem(sc, &sc->sc_pcq); nopcq: + if (sc->sc_ioptype == MFI_IOP_TBOLT) { + if (sc->sc_tbolt_reqmsgpool) + mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool); + if (sc->sc_tbolt_verbuf) + mfi_freemem(sc, &sc->sc_tbolt_verbuf); + } return 1; } @@ -885,25 +1260,49 @@ mfi_poll(struct mfi_ccb *ccb) struct mfi_softc *sc = ccb->ccb_sc; struct mfi_frame_header *hdr; int to = 0; + int rv = 0; DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc)); hdr = &ccb->ccb_frame->mfr_header; hdr->mfh_cmd_status = 0xff; - hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + if (!sc->sc_MFA_enabled) + hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; - mfi_post(sc, ccb); - bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), - ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), - sc->sc_frames_size, BUS_DMASYNC_POSTREAD); + /* no callback, caller is supposed to do the cleanup */ + ccb->ccb_done = NULL; - while (hdr->mfh_cmd_status == 0xff) { - delay(1000); - if (to++ > 5000) /* XXX 5 seconds busywait sucks */ - break; + mfi_post(sc, ccb); + if (sc->sc_MFA_enabled) { + /* + * depending on the command type, result may be posted + * to *hdr, or not. In addition it seems there's + * no way to avoid posting the SMID to the reply queue. + * So pool using the interrupt routine. + */ + while (ccb->ccb_state != MFI_CCB_DONE) { + delay(1000); + if (to++ > 5000) { /* XXX 5 seconds busywait sucks */ + rv = 1; + break; + } + mfi_tbolt_intrh(sc); + } + } else { bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), sc->sc_frames_size, BUS_DMASYNC_POSTREAD); + + while (hdr->mfh_cmd_status == 0xff) { + delay(1000); + if (to++ > 5000) { /* XXX 5 seconds busywait sucks */ + rv = 1; + break; + } + bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), + ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), + sc->sc_frames_size, BUS_DMASYNC_POSTREAD); + } } bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), @@ -912,16 +1311,16 @@ mfi_poll(struct mfi_ccb *ccb) if (ccb->ccb_data != NULL) { DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n", DEVNAME(sc)); - bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, + bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, ccb->ccb_dmamap->dm_mapsize, (ccb->ccb_direction & MFI_DATA_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); + bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap); } - if (hdr->mfh_cmd_status == 0xff) { - printf("%s: timeout on ccb %d\n", DEVNAME(sc), + if (rv != 0) { + aprint_error_dev(sc->sc_dev, "timeout on ccb %d\n", hdr->mfh_context); ccb->ccb_flags |= MFI_CCB_F_ERR; return 1; @@ -961,8 +1360,9 @@ mfi_intr(void *arg) ctx = pcq->mpc_reply_q[consumer]; pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX; if (ctx == MFI_INVALID_CTX) - printf("%s: invalid context, p: %d c: %d\n", - DEVNAME(sc), producer, consumer); + aprint_error_dev(sc->sc_dev, + "invalid context, p: %d c: %d\n", + producer, consumer); else { /* XXX remove from queue and call scsi_done */ ccb = &sc->sc_ccb[ctx]; @@ -990,13 +1390,13 @@ mfi_intr(void *arg) } static int -mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno, +mfi_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint64_t blockno, uint32_t blockcnt) { struct scsipi_periph *periph = xs->xs_periph; struct mfi_io_frame *io; - DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n", + DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld_io: %d\n", device_xname(periph->periph_channel->chan_adapter->adapt_dev), periph->periph_target); @@ -1016,12 +1416,12 @@ mfi_scsi_io(struct mfi_ccb *ccb, struct io->mif_header.mfh_flags = 0; io->mif_header.mfh_sense_len = MFI_SENSE_SIZE; io->mif_header.mfh_data_len= blockcnt; - io->mif_lba_hi = 0; - io->mif_lba_lo = blockno; + io->mif_lba_hi = (blockno >> 32); + io->mif_lba_lo = (blockno & 0xffffffff); io->mif_sense_addr_lo = htole32(ccb->ccb_psense); io->mif_sense_addr_hi = 0; - ccb->ccb_done = mfi_scsi_xs_done; + ccb->ccb_done = mfi_scsi_ld_done; ccb->ccb_xs = xs; ccb->ccb_frame_size = MFI_IO_FRAME_SIZE; ccb->ccb_sgl = &io->mif_sgl; @@ -1036,11 +1436,17 @@ mfi_scsi_io(struct mfi_ccb *ccb, struct } static void -mfi_scsi_xs_done(struct mfi_ccb *ccb) +mfi_scsi_ld_done(struct mfi_ccb *ccb) +{ + struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; + mfi_scsi_xs_done(ccb, hdr->mfh_cmd_status, hdr->mfh_scsi_status); +} + +static void +mfi_scsi_xs_done(struct mfi_ccb *ccb, int status, int scsi_status) { struct scsipi_xfer *xs = ccb->ccb_xs; struct mfi_softc *sc = ccb->ccb_sc; - struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n", DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame); @@ -1048,26 +1454,26 @@ mfi_scsi_xs_done(struct mfi_ccb *ccb) if (xs->data != NULL) { DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n", DEVNAME(sc)); - bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, + bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, ccb->ccb_dmamap->dm_mapsize, (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); + bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap); } - if (hdr->mfh_cmd_status != MFI_STAT_OK) { + if (status != MFI_STAT_OK) { xs->error = XS_DRIVER_STUFFUP; DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n", - DEVNAME(sc), hdr->mfh_cmd_status); + DEVNAME(sc), status); - if (hdr->mfh_scsi_status != 0) { + if (scsi_status != 0) { bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense), ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense), MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD); DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sense %#x %lx %lx\n", - DEVNAME(sc), hdr->mfh_scsi_status, + DEVNAME(sc), scsi_status, (u_long)&xs->sense, (u_long)ccb->ccb_sense); memset(&xs->sense, 0, sizeof(xs->sense)); memcpy(&xs->sense, ccb->ccb_sense, @@ -1109,7 +1515,7 @@ mfi_scsi_ld(struct mfi_ccb *ccb, struct memset(pf->mpf_cdb, 0, 16); memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen); - ccb->ccb_done = mfi_scsi_xs_done; + ccb->ccb_done = mfi_scsi_ld_done; ccb->ccb_xs = xs; ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE; ccb->ccb_sgl = &pf->mpf_sgl; @@ -1143,7 +1549,10 @@ mfi_scsipi_request(struct scsipi_channel struct mfi_ccb *ccb; struct scsi_rw_6 *rw; struct scsipi_rw_10 *rwb; - uint32_t blockno, blockcnt; + struct scsipi_rw_12 *rw12; + struct scsipi_rw_16 *rw16; + uint64_t blockno; + uint32_t blockcnt; uint8_t target; uint8_t mbox[MFI_MBOX_SIZE]; int s; @@ -1153,20 +1562,27 @@ mfi_scsipi_request(struct scsipi_channel /* Not supported. */ return; case ADAPTER_REQ_SET_XFER_MODE: - /* Not supported. */ + { + struct scsipi_xfer_mode *xm = arg; + xm->xm_mode = PERIPH_CAP_TQING; + xm->xm_period = 0; + xm->xm_offset = 0; + scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm); return; + } case ADAPTER_REQ_RUN_XFER: break; } xs = arg; - DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n", - DEVNAME(sc), req, xs->cmd->opcode); - periph = xs->xs_periph; target = periph->periph_target; + DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x " + "target %d lun %d\n", DEVNAME(sc), req, xs->cmd->opcode, + periph->periph_target, periph->periph_lun); + s = splbio(); if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present || periph->periph_lun != 0) { @@ -1177,6 +1593,16 @@ mfi_scsipi_request(struct scsipi_channel splx(s); return; } + if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 || + xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) { + /* the cache is stable storage, don't flush */ + xs->error = XS_NOERROR; + xs->status = SCSI_OK; + xs->resid = 0; + scsipi_done(xs); + splx(s); + return; + } if ((ccb = mfi_get_ccb(sc)) == NULL) { DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc)); @@ -1188,13 +1614,32 @@ mfi_scsipi_request(struct scsipi_channel switch (xs->cmd->opcode) { /* IO path */ + case READ_16: + case WRITE_16: + rw16 = (struct scsipi_rw_16 *)xs->cmd; + blockno = _8btol(rw16->addr); + blockcnt = _4btol(rw16->length); + if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) { + goto stuffup; + } + break; + + case READ_12: + case WRITE_12: + rw12 = (struct scsipi_rw_12 *)xs->cmd; + blockno = _4btol(rw12->addr); + blockcnt = _4btol(rw12->length); + if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) { + goto stuffup; + } + break; + case READ_10: case WRITE_10: rwb = (struct scsipi_rw_10 *)xs->cmd; blockno = _4btol(rwb->addr); blockcnt = _2btol(rwb->length); - if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) { - mfi_put_ccb(ccb); + if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) { goto stuffup; } break; @@ -1204,17 +1649,16 @@ mfi_scsipi_request(struct scsipi_channel rw = (struct scsi_rw_6 *)xs->cmd; blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff); blockcnt = rw->length ? rw->length : 0x100; - if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) { - mfi_put_ccb(ccb); + if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) { goto stuffup; } break; case SCSI_SYNCHRONIZE_CACHE_10: + case SCSI_SYNCHRONIZE_CACHE_16: mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; if (mfi_mgmt(ccb, xs, MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) { - mfi_put_ccb(ccb); goto stuffup; } break; @@ -1229,7 +1673,6 @@ mfi_scsipi_request(struct scsipi_channel default: if (mfi_scsi_ld(ccb, xs)) { - mfi_put_ccb(ccb); goto stuffup; } break; @@ -1240,8 +1683,8 @@ mfi_scsipi_request(struct scsipi_channel if (xs->xs_control & XS_CTL_POLL) { if (mfi_poll(ccb)) { /* XXX check for sense in ccb->ccb_sense? */ - printf("%s: mfi_scsipi_request poll failed\n", - DEVNAME(sc)); + aprint_error_dev(sc->sc_dev, + "mfi_scsipi_request poll failed\n"); memset(&xs->sense, 0, sizeof(xs->sense)); xs->sense.scsi_sense.response_code = SSD_RCODE_VALID | SSD_RCODE_CURRENT; @@ -1272,6 +1715,7 @@ mfi_scsipi_request(struct scsipi_channel return; stuffup: + mfi_put_ccb(ccb); xs->error = XS_DRIVER_STUFFUP; scsipi_done(xs); splx(s); @@ -1292,14 +1736,17 @@ mfi_create_sgl(struct mfi_ccb *ccb, int if (!ccb->ccb_data) return 1; - error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap, + KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p()); + error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap, ccb->ccb_data, ccb->ccb_len, NULL, flags); if (error) { - if (error == EFBIG) - printf("more than %d dma segs\n", + if (error == EFBIG) { + aprint_error_dev(sc->sc_dev, "more than %d dma segs\n", sc->sc_max_sgl); - else - printf("error %d loading dma map\n", error); + } else { + aprint_error_dev(sc->sc_dev, + "error %d loading dma map\n", error); + } return 1; } @@ -1307,26 +1754,45 @@ mfi_create_sgl(struct mfi_ccb *ccb, int sgl = ccb->ccb_sgl; sgd = ccb->ccb_dmamap->dm_segs; for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) { - sgl->sg32[i].addr = htole32(sgd[i].ds_addr); - sgl->sg32[i].len = htole32(sgd[i].ds_len); - DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n", - DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len); + if (sc->sc_ioptype == MFI_IOP_TBOLT && + (hdr->mfh_cmd == MFI_CMD_PD_SCSI_IO || + hdr->mfh_cmd == MFI_CMD_LD_READ || + hdr->mfh_cmd == MFI_CMD_LD_WRITE)) { + sgl->sg_ieee[i].addr = htole64(sgd[i].ds_addr); + sgl->sg_ieee[i].len = htole32(sgd[i].ds_len); + sgl->sg_ieee[i].flags = 0; + DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#" + PRIx32 "\n", + DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len); + hdr->mfh_flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64; + } else if (sc->sc_64bit_dma) { + sgl->sg64[i].addr = htole64(sgd[i].ds_addr); + sgl->sg64[i].len = htole32(sgd[i].ds_len); + DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#" + PRIx32 "\n", + DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len); + hdr->mfh_flags |= MFI_FRAME_SGL64; + } else { + sgl->sg32[i].addr = htole32(sgd[i].ds_addr); + sgl->sg32[i].len = htole32(sgd[i].ds_len); + DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n", + DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len); + hdr->mfh_flags |= MFI_FRAME_SGL32; + } } if (ccb->ccb_direction == MFI_DATA_IN) { hdr->mfh_flags |= MFI_FRAME_DIR_READ; - bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, + bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); } else { hdr->mfh_flags |= MFI_FRAME_DIR_WRITE; - bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, + bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); } hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs; - /* for 64 bit io make the sizeof a variable to hold whatever sg size */ - ccb->ccb_frame_size += sizeof(struct mfi_sg32) * - ccb->ccb_dmamap->dm_nsegs; + ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs; ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE; DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d" @@ -1343,7 +1809,7 @@ mfi_create_sgl(struct mfi_ccb *ccb, int static int mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir, - uint32_t len, void *buf, uint8_t *mbox) + uint32_t len, void *buf, uint8_t *mbox, bool poll) { struct mfi_ccb *ccb; int rv = 1; @@ -1354,7 +1820,8 @@ mfi_mgmt_internal(struct mfi_softc *sc, if (rv) return rv; - if (cold) { + if (poll) { + rv = 1; if (mfi_poll(ccb)) goto done; } else { @@ -1425,12 +1892,12 @@ mfi_mgmt_done(struct mfi_ccb *ccb) if (ccb->ccb_data != NULL) { DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n", DEVNAME(sc)); - bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, + bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, ccb->ccb_dmamap->dm_mapsize, (ccb->ccb_direction & MFI_DATA_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); + bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap); } if (hdr->mfh_cmd_status != MFI_STAT_OK) @@ -1523,7 +1990,7 @@ mfi_ioctl_inq(struct mfi_softc *sc, stru /* get figures */ cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, - sizeof *cfg, cfg, NULL)) + sizeof *cfg, cfg, NULL, false)) goto freeme; strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); @@ -1546,7 +2013,7 @@ mfi_ioctl_vol(struct mfi_softc *sc, stru DEVNAME(sc), bv->bv_volid); if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN, - sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL)) + sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false)) goto done; i = bv->bv_volid; @@ -1555,7 +2022,7 @@ mfi_ioctl_vol(struct mfi_softc *sc, stru DEVNAME(sc), mbox[0]); if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, - sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox)) + sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox, false)) goto done; if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) { @@ -1647,7 +2114,7 @@ mfi_ioctl_disk(struct mfi_softc *sc, str /* send single element command to retrieve size for full structure */ cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, - sizeof *cfg, cfg, NULL)) + sizeof *cfg, cfg, NULL, false)) goto freeme; size = cfg->mfc_size; @@ -1656,7 +2123,7 @@ mfi_ioctl_disk(struct mfi_softc *sc, str /* memory for read config */ cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO); if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, - size, cfg, NULL)) + size, cfg, NULL, false)) goto freeme; ar = cfg->mfc_array; @@ -1721,7 +2188,7 @@ mfi_ioctl_disk(struct mfi_softc *sc, str *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id; memset(pd, 0, sizeof(*pd)); if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN, - sizeof *pd, pd, mbox)) + sizeof *pd, pd, mbox, false)) goto freeme; bd->bd_size = pd->mpd_size * 512; /* bytes per block */ @@ -1780,7 +2247,7 @@ mfi_ioctl_alarm(struct mfi_softc *sc, st return EINVAL; } - if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL)) + if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL, false)) rv = EINVAL; else if (ba->ba_opcode == BIOC_GASTATUS) @@ -1809,7 +2276,7 @@ mfi_ioctl_blink(struct mfi_softc *sc, st pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK); if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN, - MFI_PD_LIST_SIZE, pd, NULL)) + MFI_PD_LIST_SIZE, pd, NULL, false)) goto done; for (i = 0, found = 0; i < pd->mpl_no_pd; i++) @@ -1843,7 +2310,7 @@ mfi_ioctl_blink(struct mfi_softc *sc, st } - if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox)) + if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox, false)) goto done; rv = 0; @@ -1866,7 +2333,7 @@ mfi_ioctl_setstate(struct mfi_softc *sc, pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK); if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN, - MFI_PD_LIST_SIZE, pd, NULL)) + MFI_PD_LIST_SIZE, pd, NULL, false)) goto done; for (i = 0, found = 0; i < pd->mpl_no_pd; i++) @@ -1911,7 +2378,7 @@ mfi_ioctl_setstate(struct mfi_softc *sc, if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE, - 0, NULL, mbox)) + 0, NULL, mbox, false)) goto done; rv = 0; @@ -1944,7 +2411,7 @@ mfi_bio_hs(struct mfi_softc *sc, int vol /* send single element command to retrieve size for full structure */ cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, - sizeof *cfg, cfg, NULL)) + sizeof *cfg, cfg, NULL, false)) goto freeme; size = cfg->mfc_size; @@ -1953,7 +2420,7 @@ mfi_bio_hs(struct mfi_softc *sc, int vol /* memory for read config */ cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO); if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, - size, cfg, NULL)) + size, cfg, NULL, false)) goto freeme; /* calculate offset to hs structure */ @@ -1979,7 +2446,7 @@ mfi_bio_hs(struct mfi_softc *sc, int vol memset(mbox, 0, sizeof mbox); *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id; if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN, - sizeof *pd, pd, mbox)) { + sizeof *pd, pd, mbox, false)) { DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n", DEVNAME(sc)); goto freeme; @@ -2034,19 +2501,30 @@ static int mfi_create_sensors(struct mfi_softc *sc) { int i; - int nsensors = sc->sc_ld_cnt; + int nsensors = sc->sc_ld_cnt + 1; int rv; sc->sc_sme = sysmon_envsys_create(); sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_sensor == NULL) { - aprint_error("%s: can't allocate envsys_data_t\n", - DEVNAME(sc)); + aprint_error_dev(sc->sc_dev, "can't allocate envsys_data_t\n"); return ENOMEM; } - for (i = 0; i < nsensors; i++) { + /* BBU */ + sc->sc_sensor[0].units = ENVSYS_INDICATOR; + sc->sc_sensor[0].state = ENVSYS_SINVALID; + sc->sc_sensor[0].value_cur = 0; + /* Enable monitoring for BBU state changes, if present */ + if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) + sc->sc_sensor[0].flags |= ENVSYS_FMONCRITICAL; + snprintf(sc->sc_sensor[0].desc, + sizeof(sc->sc_sensor[0].desc), "%s BBU", DEVNAME(sc)); + if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensor[0])) + goto out; + + for (i = 1; i < nsensors; i++) { sc->sc_sensor[i].units = ENVSYS_DRIVE; sc->sc_sensor[i].state = ENVSYS_SINVALID; sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY; @@ -2055,7 +2533,7 @@ mfi_create_sensors(struct mfi_softc *sc) /* logical drives */ snprintf(sc->sc_sensor[i].desc, sizeof(sc->sc_sensor[i].desc), "%s:%d", - DEVNAME(sc), i); + DEVNAME(sc), i - 1); if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensor[i])) goto out; @@ -2066,8 +2544,8 @@ mfi_create_sensors(struct mfi_softc *sc) sc->sc_sme->sme_refresh = mfi_sensor_refresh; rv = sysmon_envsys_register(sc->sc_sme); if (rv != 0) { - aprint_error("%s: unable to register with sysmon (rv = %d)\n", - DEVNAME(sc), rv); + aprint_error_dev(sc->sc_dev, + "unable to register with sysmon (rv = %d)\n", rv); goto out; } return 0; @@ -2087,11 +2565,50 @@ mfi_sensor_refresh(struct sysmon_envsys int s; int error; - if (edata->sensor >= sc->sc_ld_cnt) + if (edata->sensor >= sc->sc_ld_cnt + 1) + return; + + if (edata->sensor == 0) { + /* BBU */ + struct mfi_bbu_status bbu_stat; + int bbu_status; + if ((sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) == 0) + return; + + KERNEL_LOCK(1, curlwp); + s = splbio(); + bbu_status = mfi_get_bbu(sc, &bbu_stat); + splx(s); + KERNEL_UNLOCK_ONE(curlwp); + switch(bbu_status) { + case MFI_BBU_GOOD: + edata->value_cur = 1; + edata->state = ENVSYS_SVALID; + if (!sc->sc_bbuok) + aprint_normal_dev(sc->sc_dev, + "BBU state changed to good\n"); + sc->sc_bbuok = true; + break; + case MFI_BBU_BAD: + edata->value_cur = 0; + edata->state = ENVSYS_SCRITICAL; + if (sc->sc_bbuok) + aprint_normal_dev(sc->sc_dev, + "BBU state changed to bad\n"); + sc->sc_bbuok = false; + break; + case MFI_BBU_UNKNOWN: + default: + edata->value_cur = 0; + edata->state = ENVSYS_SINVALID; + sc->sc_bbuok = false; + break; + } return; + } memset(&bv, 0, sizeof(bv)); - bv.bv_volid = edata->sensor; + bv.bv_volid = edata->sensor - 1; KERNEL_LOCK(1, curlwp); s = splbio(); error = mfi_ioctl_vol(sc, &bv); @@ -2171,6 +2688,7 @@ mfi_xscale_post(struct mfi_softc *sc, st mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) | ccb->ccb_extra_frames); + ccb->ccb_state = MFI_CCB_RUNNING; } static uint32_t @@ -2213,6 +2731,7 @@ mfi_ppc_post(struct mfi_softc *sc, struc { mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe | (ccb->ccb_extra_frames << 1)); + ccb->ccb_state = MFI_CCB_RUNNING; } u_int32_t @@ -2255,6 +2774,7 @@ mfi_gen2_post(struct mfi_softc *sc, stru { mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe | (ccb->ccb_extra_frames << 1)); + ccb->ccb_state = MFI_CCB_RUNNING; } u_int32_t @@ -2296,4 +2816,659 @@ mfi_skinny_post(struct mfi_softc *sc, st mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe | (ccb->ccb_extra_frames << 1)); mfi_write(sc, MFI_IQPH, 0x00000000); + ccb->ccb_state = MFI_CCB_RUNNING; +} + +#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008) + +void +mfi_tbolt_intr_ena(struct mfi_softc *sc) +{ + mfi_write(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK); + mfi_read(sc, MFI_OMSK); +} + +void +mfi_tbolt_intr_dis(struct mfi_softc *sc) +{ + mfi_write(sc, MFI_OMSK, 0xFFFFFFFF); + mfi_read(sc, MFI_OMSK); +} + +int +mfi_tbolt_intr(struct mfi_softc *sc) +{ + int32_t status; + + status = mfi_read(sc, MFI_OSTS); + + if (ISSET(status, 0x1)) { + mfi_write(sc, MFI_OSTS, status); + mfi_read(sc, MFI_OSTS); + if (ISSET(status, MFI_STATE_CHANGE_INTERRUPT)) + return 0; + return 1; + } + if (!ISSET(status, MFI_FUSION_ENABLE_INTERRUPT_MASK)) + return 0; + mfi_read(sc, MFI_OSTS); + return 1; +} + +u_int32_t +mfi_tbolt_fw_state(struct mfi_softc *sc) +{ + return mfi_read(sc, MFI_OSP); +} + +void +mfi_tbolt_post(struct mfi_softc *sc, struct mfi_ccb *ccb) +{ + if (sc->sc_MFA_enabled) { + if ((ccb->ccb_flags & MFI_CCB_F_TBOLT) == 0) + mfi_tbolt_build_mpt_ccb(ccb); + mfi_write(sc, MFI_IQPL, + ccb->ccb_tb_request_desc.words & 0xFFFFFFFF); + mfi_write(sc, MFI_IQPH, + ccb->ccb_tb_request_desc.words >> 32); + ccb->ccb_state = MFI_CCB_RUNNING; + return; + } + uint64_t bus_add = ccb->ccb_pframe; + bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA + << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + mfi_write(sc, MFI_IQPL, bus_add); + mfi_write(sc, MFI_IQPH, bus_add >> 32); + ccb->ccb_state = MFI_CCB_RUNNING; +} + +static void +mfi_tbolt_build_mpt_ccb(struct mfi_ccb *ccb) +{ + union mfi_mpi2_request_descriptor *req_desc = &ccb->ccb_tb_request_desc; + struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request; + struct mpi25_ieee_sge_chain64 *mpi25_ieee_chain; + + io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST; + io_req->SGLOffset0 = + offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4; + io_req->ChainOffset = + offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 16; + + mpi25_ieee_chain = + (struct mpi25_ieee_sge_chain64 *)&io_req->SGL.IeeeChain; + mpi25_ieee_chain->Address = ccb->ccb_pframe; + + /* + In MFI pass thru, nextChainOffset will always be zero to + indicate the end of the chain. + */ + mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT + | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; + + /* setting the length to the maximum length */ + mpi25_ieee_chain->Length = 1024; + + req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << + MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + ccb->ccb_flags |= MFI_CCB_F_TBOLT; + bus_dmamap_sync(ccb->ccb_sc->sc_dmat, + MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool), + ccb->ccb_tb_pio_request - + MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool), + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); +} + +/* + * Description: + * This function will prepare message pools for the Thunderbolt controller + */ +static int +mfi_tbolt_init_desc_pool(struct mfi_softc *sc) +{ + uint32_t offset = 0; + uint8_t *addr = MFIMEM_KVA(sc->sc_tbolt_reqmsgpool); + + /* Request Decriptors alignement restrictions */ + KASSERT(((uintptr_t)addr & 0xFF) == 0); + + /* Skip request message pool */ + addr = &addr[MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1)]; + + /* Reply Frame Pool is initialized */ + sc->sc_reply_frame_pool = (struct mfi_mpi2_reply_header *) addr; + KASSERT(((uintptr_t)addr & 0xFF) == 0); + + offset = (uintptr_t)sc->sc_reply_frame_pool + - (uintptr_t)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool); + sc->sc_reply_frame_busaddr = + MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) + offset; + + /* initializing reply address to 0xFFFFFFFF */ + memset((uint8_t *)sc->sc_reply_frame_pool, 0xFF, + (MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size)); + + /* Skip Reply Frame Pool */ + addr += MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size; + sc->sc_reply_pool_limit = (void *)addr; + + offset = MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size; + sc->sc_sg_frame_busaddr = sc->sc_reply_frame_busaddr + offset; + + /* initialize the last_reply_idx to 0 */ + sc->sc_last_reply_idx = 0; + offset = (sc->sc_sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME * + sc->sc_max_cmds)) - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool); + KASSERT(offset <= sc->sc_tbolt_reqmsgpool->am_size); + bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 0, + MFIMEM_MAP(sc->sc_tbolt_reqmsgpool)->dm_mapsize, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + return 0; +} + +/* + * This routine prepare and issue INIT2 frame to the Firmware + */ + +static int +mfi_tbolt_init_MFI_queue(struct mfi_softc *sc) +{ + struct mpi2_ioc_init_request *mpi2IocInit; + struct mfi_init_frame *mfi_init; + struct mfi_ccb *ccb; + bus_addr_t phyAddress; + mfi_address *mfiAddressTemp; + int s; + char *verbuf; + char wqbuf[10]; + + /* Check if initialization is already completed */ + if (sc->sc_MFA_enabled) { + return 1; + } + + mpi2IocInit = + (struct mpi2_ioc_init_request *)MFIMEM_KVA(sc->sc_tbolt_ioc_init); + + s = splbio(); + if ((ccb = mfi_get_ccb(sc)) == NULL) { + splx(s); + return (EBUSY); + } + + + mfi_init = &ccb->ccb_frame->mfr_init; + + memset(mpi2IocInit, 0, sizeof(struct mpi2_ioc_init_request)); + mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT; + mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER; + + /* set MsgVersion and HeaderVersion host driver was built with */ + mpi2IocInit->MsgVersion = MPI2_VERSION; + mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION; + mpi2IocInit->SystemRequestFrameSize = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE/4; + mpi2IocInit->ReplyDescriptorPostQueueDepth = + (uint16_t)sc->sc_reply_pool_size; + mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */ + + /* Get physical address of reply frame pool */ + phyAddress = sc->sc_reply_frame_busaddr; + mfiAddressTemp = + (mfi_address *)&mpi2IocInit->ReplyDescriptorPostQueueAddress; + mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; + mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); + + /* Get physical address of request message pool */ + phyAddress = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool); + mfiAddressTemp = (mfi_address *)&mpi2IocInit->SystemRequestFrameBaseAddress; + mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; + mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); + + mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */ + mpi2IocInit->TimeStamp = time_uptime; + + verbuf = MFIMEM_KVA(sc->sc_tbolt_verbuf); + snprintf(verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n", + MEGASAS_VERSION); + bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0, + MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_PREWRITE); + mfi_init->driver_ver_lo = htole32(MFIMEM_DVA(sc->sc_tbolt_verbuf)); + mfi_init->driver_ver_hi = + htole32((uint64_t)MFIMEM_DVA(sc->sc_tbolt_verbuf) >> 32); + + bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0, + MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize, + BUS_DMASYNC_PREWRITE); + /* Get the physical address of the mpi2 ioc init command */ + phyAddress = MFIMEM_DVA(sc->sc_tbolt_ioc_init); + mfi_init->mif_qinfo_new_addr_lo = htole32(phyAddress); + mfi_init->mif_qinfo_new_addr_hi = htole32((uint64_t)phyAddress >> 32); + + mfi_init->mif_header.mfh_cmd = MFI_CMD_INIT; + mfi_init->mif_header.mfh_data_len = sizeof(struct mpi2_ioc_init_request); + if (mfi_poll(ccb) != 0) { + aprint_error_dev(sc->sc_dev, "failed to send IOC init2 " + "command at 0x%" PRIx64 "\n", + (uint64_t)ccb->ccb_pframe); + splx(s); + return 1; + } + bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0, + MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_POSTWRITE); + bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0, + MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize, + BUS_DMASYNC_POSTWRITE); + mfi_put_ccb(ccb); + splx(s); + + if (mfi_init->mif_header.mfh_cmd_status == 0) { + sc->sc_MFA_enabled = 1; + } + else { + aprint_error_dev(sc->sc_dev, "Init command Failed %x\n", + mfi_init->mif_header.mfh_cmd_status); + return 1; + } + + snprintf(wqbuf, sizeof(wqbuf), "%swq", DEVNAME(sc)); + if (workqueue_create(&sc->sc_ldsync_wq, wqbuf, mfi_tbolt_sync_map_info, + sc, PRIBIO, IPL_BIO, 0) != 0) { + aprint_error_dev(sc->sc_dev, "workqueue_create failed\n"); + return 1; + } + workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL); + return 0; +} + +int +mfi_tbolt_intrh(void *arg) +{ + struct mfi_softc *sc = arg; + struct mfi_ccb *ccb; + union mfi_mpi2_reply_descriptor *desc; + int smid, num_completed; + + if (!mfi_tbolt_intr(sc)) + return 0; + + DNPRINTF(MFI_D_INTR, "%s: mfi_tbolt_intrh %#lx %#lx\n", DEVNAME(sc), + (u_long)sc, (u_long)sc->sc_last_reply_idx); + + KASSERT(sc->sc_last_reply_idx < sc->sc_reply_pool_size); + + desc = (union mfi_mpi2_reply_descriptor *) + ((uintptr_t)sc->sc_reply_frame_pool + + sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE); + + bus_dmamap_sync(sc->sc_dmat, + MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1), + MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + num_completed = 0; + while ((desc->header.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK) != + MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { + smid = desc->header.SMID; + KASSERT(smid > 0 && smid <= sc->sc_max_cmds); + ccb = &sc->sc_ccb[smid - 1]; + DNPRINTF(MFI_D_INTR, + "%s: mfi_tbolt_intr SMID %#x reply_idx %#x " + "desc %#" PRIx64 " ccb %p\n", DEVNAME(sc), smid, + sc->sc_last_reply_idx, desc->words, ccb); + KASSERT(ccb->ccb_state == MFI_CCB_RUNNING); + if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO && + ccb->ccb_tb_io_request->ChainOffset != 0) { + bus_dmamap_sync(sc->sc_dmat, + MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), + ccb->ccb_tb_psg_frame - + MFIMEM_DVA(sc->sc_tbolt_reqmsgpool), + MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_POSTREAD); + } + if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO) { + bus_dmamap_sync(sc->sc_dmat, + MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), + ccb->ccb_tb_pio_request - + MFIMEM_DVA(sc->sc_tbolt_reqmsgpool), + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + } + if (ccb->ccb_done) + ccb->ccb_done(ccb); + else + ccb->ccb_state = MFI_CCB_DONE; + sc->sc_last_reply_idx++; + if (sc->sc_last_reply_idx >= sc->sc_reply_pool_size) { + sc->sc_last_reply_idx = 0; + } + desc->words = ~0x0; + /* Get the next reply descriptor */ + desc = (union mfi_mpi2_reply_descriptor *) + ((uintptr_t)sc->sc_reply_frame_pool + + sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE); + num_completed++; + } + if (num_completed == 0) + return 0; + + bus_dmamap_sync(sc->sc_dmat, + MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1), + MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + mfi_write(sc, MFI_RPI, sc->sc_last_reply_idx); + return 1; +} + + +int +mfi_tbolt_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, + uint64_t blockno, uint32_t blockcnt) +{ + struct scsipi_periph *periph = xs->xs_periph; + struct mfi_mpi2_request_raid_scsi_io *io_req; + int sge_count; + + DNPRINTF(MFI_D_CMD, "%s: mfi_tbolt_scsi_ld_io: %d\n", + device_xname(periph->periph_channel->chan_adapter->adapt_dev), + periph->periph_target); + + if (!xs->data) + return 1; + + ccb->ccb_done = mfi_tbolt_scsi_ld_done; + ccb->ccb_xs = xs; + ccb->ccb_data = xs->data; + ccb->ccb_len = xs->datalen; + + io_req = ccb->ccb_tb_io_request; + + /* Just the CDB length,rest of the Flags are zero */ + io_req->IoFlags = xs->cmdlen; + memset(io_req->CDB.CDB32, 0, 32); + memcpy(io_req->CDB.CDB32, &xs->cmdstore, xs->cmdlen); + + io_req->RaidContext.TargetID = periph->periph_target; + io_req->RaidContext.Status = 0; + io_req->RaidContext.exStatus = 0; + io_req->RaidContext.timeoutValue = MFI_FUSION_FP_DEFAULT_TIMEOUT; + io_req->Function = MPI2_FUNCTION_LD_IO_REQUEST; + io_req->DevHandle = periph->periph_target; + + ccb->ccb_tb_request_desc.header.RequestFlags = + (MFI_REQ_DESCRIPT_FLAGS_LD_IO << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + io_req->DataLength = blockcnt * MFI_SECTOR_LEN; + + if (xs->xs_control & XS_CTL_DATA_IN) { + io_req->Control = MPI2_SCSIIO_CONTROL_READ; + ccb->ccb_direction = MFI_DATA_IN; + } else { + io_req->Control = MPI2_SCSIIO_CONTROL_WRITE; + ccb->ccb_direction = MFI_DATA_OUT; + } + + sge_count = mfi_tbolt_create_sgl(ccb, + (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK + ); + if (sge_count < 0) + return 1; + KASSERT(sge_count <= ccb->ccb_sc->sc_max_sgl); + io_req->RaidContext.numSGE = sge_count; + io_req->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; + io_req->SGLOffset0 = + offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4; + + io_req->SenseBufferLowAddress = htole32(ccb->ccb_psense); + io_req->SenseBufferLength = MFI_SENSE_SIZE; + + ccb->ccb_flags |= MFI_CCB_F_TBOLT | MFI_CCB_F_TBOLT_IO; + bus_dmamap_sync(ccb->ccb_sc->sc_dmat, + MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool), + ccb->ccb_tb_pio_request - + MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool), + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + return 0; +} + + +static void +mfi_tbolt_scsi_ld_done(struct mfi_ccb *ccb) +{ + struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request; + mfi_scsi_xs_done(ccb, io_req->RaidContext.Status, + io_req->RaidContext.exStatus); +} + +static int +mfi_tbolt_create_sgl(struct mfi_ccb *ccb, int flags) +{ + struct mfi_softc *sc = ccb->ccb_sc; + bus_dma_segment_t *sgd; + int error, i, sge_idx, sge_count; + struct mfi_mpi2_request_raid_scsi_io *io_req; + struct mpi25_ieee_sge_chain64 *sgl_ptr; + + DNPRINTF(MFI_D_DMA, "%s: mfi_tbolt_create_sgl %#lx\n", DEVNAME(sc), + (u_long)ccb->ccb_data); + + if (!ccb->ccb_data) + return -1; + + KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p()); + error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap, + ccb->ccb_data, ccb->ccb_len, NULL, flags); + if (error) { + if (error == EFBIG) + aprint_error_dev(sc->sc_dev, "more than %d dma segs\n", + sc->sc_max_sgl); + else + aprint_error_dev(sc->sc_dev, + "error %d loading dma map\n", error); + return -1; + } + + io_req = ccb->ccb_tb_io_request; + sgl_ptr = &io_req->SGL.IeeeChain.Chain64; + sge_count = ccb->ccb_dmamap->dm_nsegs; + sgd = ccb->ccb_dmamap->dm_segs; + KASSERT(sge_count <= sc->sc_max_sgl); + KASSERT(sge_count <= + (MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1 + + MEGASAS_THUNDERBOLT_MAX_SGE_IN_CHAINMSG)); + + if (sge_count > MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG) { + /* One element to store the chain info */ + sge_idx = MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1; + DNPRINTF(MFI_D_DMA, + "mfi sge_idx %d sge_count %d io_req paddr 0x%" PRIx64 "\n", + sge_idx, sge_count, ccb->ccb_tb_pio_request); + } else { + sge_idx = sge_count; + } + + for (i = 0; i < sge_idx; i++) { + sgl_ptr->Address = htole64(sgd[i].ds_addr); + sgl_ptr->Length = htole32(sgd[i].ds_len); + sgl_ptr->Flags = 0; + if (sge_idx < sge_count) { + DNPRINTF(MFI_D_DMA, + "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32 + " flags 0x%x\n", sgl_ptr, i, + sgl_ptr->Address, sgl_ptr->Length, + sgl_ptr->Flags); + } + sgl_ptr++; + } + io_req->ChainOffset = 0; + if (sge_idx < sge_count) { + struct mpi25_ieee_sge_chain64 *sg_chain; + io_req->ChainOffset = MEGASAS_THUNDERBOLT_CHAIN_OFF_MAINMSG; + sg_chain = sgl_ptr; + /* Prepare chain element */ + sg_chain->NextChainOffset = 0; + sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); + sg_chain->Length = (sizeof(mpi2_sge_io_union) * + (sge_count - sge_idx)); + sg_chain->Address = ccb->ccb_tb_psg_frame; + DNPRINTF(MFI_D_DMA, + "sgl %p chain 0x%" PRIx64 " len 0x%" PRIx32 + " flags 0x%x\n", sg_chain, sg_chain->Address, + sg_chain->Length, sg_chain->Flags); + sgl_ptr = &ccb->ccb_tb_sg_frame->IeeeChain.Chain64; + for (; i < sge_count; i++) { + sgl_ptr->Address = htole64(sgd[i].ds_addr); + sgl_ptr->Length = htole32(sgd[i].ds_len); + sgl_ptr->Flags = 0; + DNPRINTF(MFI_D_DMA, + "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32 + " flags 0x%x\n", sgl_ptr, i, sgl_ptr->Address, + sgl_ptr->Length, sgl_ptr->Flags); + sgl_ptr++; + } + bus_dmamap_sync(sc->sc_dmat, + MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), + ccb->ccb_tb_psg_frame - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool), + MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_PREREAD); + } + + if (ccb->ccb_direction == MFI_DATA_IN) { + bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, + ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); + } else { + bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, + ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); + } + return sge_count; +} + +/* + * The ThunderBolt HW has an option for the driver to directly + * access the underlying disks and operate on the RAID. To + * do this there needs to be a capability to keep the RAID controller + * and driver in sync. The FreeBSD driver does not take advantage + * of this feature since it adds a lot of complexity and slows down + * performance. Performance is gained by using the controller's + * cache etc. + * + * Even though this driver doesn't access the disks directly, an + * AEN like command is used to inform the RAID firmware to "sync" + * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This + * command in write mode will return when the RAID firmware has + * detected a change to the RAID state. Examples of this type + * of change are removing a disk. Once the command returns then + * the driver needs to acknowledge this and "sync" all LD's again. + * This repeats until we shutdown. Then we need to cancel this + * pending command. + * + * If this is not done right the RAID firmware will not remove a + * pulled drive and the RAID won't go degraded etc. Effectively, + * stopping any RAID mangement to functions. + * + * Doing another LD sync, requires the use of an event since the + * driver needs to do a mfi_wait_command and can't do that in an + * interrupt thread. + * + * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO + * That requires a bunch of structure and it is simplier to just do + * the MFI_DCMD_LD_GET_LIST versus walking the RAID map. + */ + +void +mfi_tbolt_sync_map_info(struct work *w, void *v) +{ + struct mfi_softc *sc = v; + int i; + struct mfi_ccb *ccb = NULL; + uint8_t mbox[MFI_MBOX_SIZE]; + struct mfi_ld *ld_sync = NULL; + size_t ld_size; + int s; + + DNPRINTF(MFI_D_SYNC, "%s: mfi_tbolt_sync_map_info\n", DEVNAME(sc)); +again: + s = splbio(); + if (sc->sc_ldsync_ccb != NULL) { + splx(s); + return; + } + + if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN, + sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false)) { + aprint_error_dev(sc->sc_dev, "MR_DCMD_LD_GET_LIST failed\n"); + goto err; + } + + ld_size = sizeof(*ld_sync) * sc->sc_ld_list.mll_no_ld; + + ld_sync = (struct mfi_ld *) malloc(ld_size, M_DEVBUF, + M_WAITOK | M_ZERO); + if (ld_sync == NULL) { + aprint_error_dev(sc->sc_dev, "Failed to allocate sync\n"); + goto err; + } + for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) { + ld_sync[i] = sc->sc_ld_list.mll_list[i].mll_ld; + } + + if ((ccb = mfi_get_ccb(sc)) == NULL) { + aprint_error_dev(sc->sc_dev, "Failed to get sync command\n"); + free(ld_sync, M_DEVBUF); + goto err; + } + sc->sc_ldsync_ccb = ccb; + + memset(mbox, 0, MFI_MBOX_SIZE); + mbox[0] = sc->sc_ld_list.mll_no_ld; + mbox[1] = MFI_DCMD_MBOX_PEND_FLAG; + if (mfi_mgmt(ccb, NULL, MR_DCMD_LD_MAP_GET_INFO, MFI_DATA_OUT, + ld_size, ld_sync, mbox)) { + aprint_error_dev(sc->sc_dev, "Failed to create sync command\n"); + goto err; + } + /* + * we won't sleep on this command, so we have to override + * the callback set up by mfi_mgmt() + */ + ccb->ccb_done = mfi_sync_map_complete; + + mfi_post(sc, ccb); + splx(s); + return; + +err: + if (ld_sync) + free(ld_sync, M_DEVBUF); + if (ccb) + mfi_put_ccb(ccb); + sc->sc_ldsync_ccb = NULL; + splx(s); + kpause("ldsyncp", 0, hz, NULL); + goto again; +} + +static void +mfi_sync_map_complete(struct mfi_ccb *ccb) +{ + struct mfi_softc *sc = ccb->ccb_sc; + bool aborted = !sc->sc_running; + + DNPRINTF(MFI_D_SYNC, "%s: mfi_sync_map_complete\n", + DEVNAME(ccb->ccb_sc)); + KASSERT(sc->sc_ldsync_ccb == ccb); + mfi_mgmt_done(ccb); + free(ccb->ccb_data, M_DEVBUF); + if (ccb->ccb_flags & MFI_CCB_F_ERR) { + aprint_error_dev(sc->sc_dev, "sync command failed\n"); + aborted = true; + } + mfi_put_ccb(ccb); + sc->sc_ldsync_ccb = NULL; + + /* set it up again so the driver can catch more events */ + if (!aborted) { + workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL); + } }