aboutsummaryrefslogtreecommitdiffstats
path: root/cvmx-csr-typedefs.h
diff options
context:
space:
mode:
Diffstat (limited to 'cvmx-csr-typedefs.h')
-rw-r--r--cvmx-csr-typedefs.h73991
1 files changed, 73991 insertions, 0 deletions
diff --git a/cvmx-csr-typedefs.h b/cvmx-csr-typedefs.h
new file mode 100644
index 000000000000..24d63867046a
--- /dev/null
+++ b/cvmx-csr-typedefs.h
@@ -0,0 +1,73991 @@
+/***********************license start***************
+ * Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Octeon. Include cvmx-csr.h instead of this file directly.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+#ifndef __CVMX_CSR_TYPEDEFS_H__
+#define __CVMX_CSR_TYPEDEFS_H__
+
+
+/**
+ * cvmx_agl_gmx_bad_reg
+ *
+ * AGL_GMX_BAD_REG = A collection of things that have gone very, very wrong
+ *
+ *
+ * Notes:
+ * OUT_OVR[0], OVRFLW, TXPOP, TXPSH will be reset when MIX0_CTL[RESET] is set to 1.
+ * OUT_OVR[1], OVRFLW1, TXPOP1, TXPSH1 will be reset when MIX1_CTL[RESET] is set to 1.
+ * LOSTSTAT, STATOVR, STATOVR will bee reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_bad_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_38_63 : 26;
+ uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */
+ uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */
+ uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */
+ uint64_t txpsh : 1; /**< TX FIFO overflow */
+ uint64_t txpop : 1; /**< TX FIFO underflow */
+ uint64_t ovrflw : 1; /**< RX FIFO overflow */
+ uint64_t reserved_27_31 : 5;
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t reserved_23_25 : 3;
+ uint64_t loststat : 1; /**< TX Statistics data was over-written
+ TX Stats are corrupted */
+ uint64_t reserved_4_21 : 18;
+ uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 2;
+ uint64_t reserved_4_21 : 18;
+ uint64_t loststat : 1;
+ uint64_t reserved_23_25 : 3;
+ uint64_t statovr : 1;
+ uint64_t reserved_27_31 : 5;
+ uint64_t ovrflw : 1;
+ uint64_t txpop : 1;
+ uint64_t txpsh : 1;
+ uint64_t ovrflw1 : 1;
+ uint64_t txpop1 : 1;
+ uint64_t txpsh1 : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_agl_gmx_bad_reg_s cn52xx;
+ struct cvmx_agl_gmx_bad_reg_s cn52xxp1;
+ struct cvmx_agl_gmx_bad_reg_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_35_63 : 29;
+ uint64_t txpsh : 1; /**< TX FIFO overflow */
+ uint64_t txpop : 1; /**< TX FIFO underflow */
+ uint64_t ovrflw : 1; /**< RX FIFO overflow */
+ uint64_t reserved_27_31 : 5;
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t reserved_23_25 : 3;
+ uint64_t loststat : 1; /**< TX Statistics data was over-written
+ TX Stats are corrupted */
+ uint64_t reserved_3_21 : 19;
+ uint64_t out_ovr : 1; /**< Outbound data FIFO overflow */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 1;
+ uint64_t reserved_3_21 : 19;
+ uint64_t loststat : 1;
+ uint64_t reserved_23_25 : 3;
+ uint64_t statovr : 1;
+ uint64_t reserved_27_31 : 5;
+ uint64_t ovrflw : 1;
+ uint64_t txpop : 1;
+ uint64_t txpsh : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1;
+} cvmx_agl_gmx_bad_reg_t;
+
+
+/**
+ * cvmx_agl_gmx_bist
+ *
+ * AGL_GMX_BIST = GMX BIST Results
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_bist_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t status : 10; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.drf64x78m1_bist
+ - 1: gmx#.outb.fif.drf64x71m1_bist
+ - 2: gmx#.csr.gmi0.srf8x64m1_bist
+ - 3: 0
+ - 4: 0
+ - 5: 0
+ - 6: gmx#.csr.drf20x80m1_bist
+ - 7: gmx#.outb.stat.drf16x27m1_bist
+ - 8: gmx#.outb.stat.drf40x64m1_bist
+ - 9: 0 */
+#else
+ uint64_t status : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_agl_gmx_bist_s cn52xx;
+ struct cvmx_agl_gmx_bist_s cn52xxp1;
+ struct cvmx_agl_gmx_bist_s cn56xx;
+ struct cvmx_agl_gmx_bist_s cn56xxp1;
+} cvmx_agl_gmx_bist_t;
+
+
+/**
+ * cvmx_agl_gmx_drv_ctl
+ *
+ * AGL_GMX_DRV_CTL = GMX Drive Control
+ *
+ *
+ * Notes:
+ * NCTL, PCTL, BYP_EN will be reset when MIX0_CTL[RESET] is set to 1.
+ * NCTL1, PCTL1, BYP_EN1 will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_drv_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_49_63 : 15;
+ uint64_t byp_en1 : 1; /**< Compensation Controller Bypass Enable (MII1) */
+ uint64_t reserved_45_47 : 3;
+ uint64_t pctl1 : 5; /**< AGL PCTL (MII1) */
+ uint64_t reserved_37_39 : 3;
+ uint64_t nctl1 : 5; /**< AGL NCTL (MII1) */
+ uint64_t reserved_17_31 : 15;
+ uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pctl : 5; /**< AGL PCTL */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< AGL NCTL */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t byp_en : 1;
+ uint64_t reserved_17_31 : 15;
+ uint64_t nctl1 : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t pctl1 : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t byp_en1 : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_agl_gmx_drv_ctl_s cn52xx;
+ struct cvmx_agl_gmx_drv_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_drv_ctl_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pctl : 5; /**< AGL PCTL */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< AGL NCTL */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t byp_en : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_drv_ctl_cn56xx cn56xxp1;
+} cvmx_agl_gmx_drv_ctl_t;
+
+
+/**
+ * cvmx_agl_gmx_inf_mode
+ *
+ * AGL_GMX_INF_MODE = Interface Mode
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_inf_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t en : 1; /**< Interface Enable */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_agl_gmx_inf_mode_s cn52xx;
+ struct cvmx_agl_gmx_inf_mode_s cn52xxp1;
+ struct cvmx_agl_gmx_inf_mode_s cn56xx;
+ struct cvmx_agl_gmx_inf_mode_s cn56xxp1;
+} cvmx_agl_gmx_inf_mode_t;
+
+
+/**
+ * cvmx_agl_gmx_prt#_cfg
+ *
+ * AGL_GMX_PRT_CFG = Port description
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_prtx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all MII cycles will appear as
+ inter-frame cycles. */
+ uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all MII cycles will appear as
+ inter-frame cycles. */
+ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
+ 0 = 512 bitimes (10/100Mbs operation)
+ 1 = Reserved */
+ uint64_t duplex : 1; /**< Duplex
+ 0 = Half Duplex (collisions/extentions/bursts)
+ 1 = Full Duplex */
+ uint64_t speed : 1; /**< Link Speed
+ 0 = 10/100Mbs operation
+ 1 = Reserved */
+ uint64_t en : 1; /**< Link Enable
+ When EN is clear, packets will not be received
+ or transmitted (including PAUSE and JAM packets).
+ If EN is cleared while a packet is currently
+ being received or transmitted, the packet will
+ be allowed to complete before the bus is idled.
+ On the RX side, subsequent packets in a burst
+ will be ignored. */
+#else
+ uint64_t en : 1;
+ uint64_t speed : 1;
+ uint64_t duplex : 1;
+ uint64_t slottime : 1;
+ uint64_t rx_en : 1;
+ uint64_t tx_en : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_prtx_cfg_s cn52xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn52xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_s cn56xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn56xxp1;
+} cvmx_agl_gmx_prtx_cfg_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam0
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1;
+} cvmx_agl_gmx_rxx_adr_cam0_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam1
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1;
+} cvmx_agl_gmx_rxx_adr_cam1_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam2
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1;
+} cvmx_agl_gmx_rxx_adr_cam2_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam3
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1;
+} cvmx_agl_gmx_rxx_adr_cam3_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam4
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1;
+} cvmx_agl_gmx_rxx_adr_cam4_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam5
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1;
+} cvmx_agl_gmx_rxx_adr_cam5_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam_en
+ *
+ * AGL_GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t en : 8; /**< CAM Entry Enables */
+#else
+ uint64_t en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1;
+} cvmx_agl_gmx_rxx_adr_cam_en_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_adr_ctl
+ *
+ * AGL_GMX_RX_ADR_CTL = Address Filtering Control
+ *
+ *
+ * Notes:
+ * * ALGORITHM
+ * Here is some pseudo code that represents the address filter behavior.
+ *
+ * @verbatim
+ * bool dmac_addr_filter(uint8 prt, uint48 dmac) [
+ * ASSERT(prt >= 0 && prt <= 3);
+ * if (is_bcst(dmac)) // broadcast accept
+ * return (AGL_GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
+ * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject
+ * return REJECT;
+ * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept
+ * return ACCEPT;
+ *
+ * cam_hit = 0;
+ *
+ * for (i=0; i<8; i++) [
+ * if (AGL_GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0)
+ * continue;
+ * uint48 unswizzled_mac_adr = 0x0;
+ * for (j=5; j>=0; j--) [
+ * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | AGL_GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>];
+ * ]
+ * if (unswizzled_mac_adr == dmac) [
+ * cam_hit = 1;
+ * break;
+ * ]
+ * ]
+ *
+ * if (cam_hit)
+ * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
+ * else
+ * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
+ * ]
+ * @endverbatim
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter
+ 0 = reject the packet on DMAC address match
+ 1 = accept the packet on DMAC address match */
+ uint64_t mcst : 2; /**< Multicast Mode
+ 0 = Use the Address Filter CAM
+ 1 = Force reject all multicast packets
+ 2 = Force accept all multicast packets
+ 3 = Reserved */
+ uint64_t bcst : 1; /**< Accept All Broadcast Packets */
+#else
+ uint64_t bcst : 1;
+ uint64_t mcst : 2;
+ uint64_t cam_mode : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1;
+} cvmx_agl_gmx_rxx_adr_ctl_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_decision
+ *
+ * AGL_GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
+ *
+ *
+ * Notes:
+ * As each byte in a packet is received by GMX, the L2 byte count is compared
+ * against the AGL_GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes
+ * from the beginning of the L2 header (DMAC). In normal operation, the L2
+ * header begins after the PREAMBLE+SFD (AGL_GMX_RX_FRM_CTL[PRE_CHK]=1) and any
+ * optional UDD skip data (AGL_GMX_RX_UDD_SKP[LEN]).
+ *
+ * When AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
+ * packet and would require UDD skip length to account for them.
+ *
+ * L2 Size
+ * Port Mode <=AGL_GMX_RX_DECISION bytes (default=24) >AGL_GMX_RX_DECISION bytes (default=24)
+ *
+ * MII/Full Duplex accept packet apply filters
+ * no filtering is applied accept packet based on DMAC and PAUSE packet filters
+ *
+ * MII/Half Duplex drop packet apply filters
+ * packet is unconditionally dropped accept packet based on DMAC
+ *
+ * where l2_size = MAX(0, total_packet_size - AGL_GMX_RX_UDD_SKP[LEN] - ((AGL_GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_decision_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t cnt : 5; /**< The byte count to decide when to accept or filter
+ a packet. */
+#else
+ uint64_t cnt : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_decision_s cn52xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_decision_s cn56xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn56xxp1;
+} cvmx_agl_gmx_rxx_decision_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_frm_chk
+ *
+ * AGL_GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
+ *
+ *
+ * Notes:
+ * If AGL_GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_chk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t reserved_1_1 : 1;
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn56xxp1;
+} cvmx_agl_gmx_rxx_frm_chk_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_frm_ctl
+ *
+ * AGL_GMX_RX_FRM_CTL = Frame Control
+ *
+ *
+ * Notes:
+ * * PRE_CHK
+ * When set, the MII state expects a typical frame consisting of
+ * INTER_FRAME=>PREAMBLE(x7)=>SFD(x1)=>DAT. The state machine watches for
+ * this exact sequence in order to recognize a valid frame and push frame
+ * data into the Octane. There must be exactly 7 PREAMBLE cycles followed by
+ * the single SFD cycle for the frame to be accepted.
+ *
+ * When a problem does occur within the PREAMBLE seqeunce, the frame is
+ * marked as bad and not sent into the core. The AGL_GMX_RX_INT_REG[PCTERR]
+ * interrupt is also raised.
+ *
+ * * PRE_STRP
+ * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
+ * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
+ * core as part of the packet.
+ *
+ * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
+ * size when checking against the MIN and MAX bounds. Furthermore, the bytes
+ * are skipped when locating the start of the L2 header for DMAC and Control
+ * frame recognition.
+ *
+ * * CTL_BCK/CTL_DRP
+ * These bits control how the HW handles incoming PAUSE packets. Here are
+ * the most common modes of operation:
+ * CTL_BCK=1,CTL_DRP=1 - HW does it all
+ * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames
+ * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored
+ *
+ * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
+ * Since PAUSE packets only apply to fulldup operation, any PAUSE packet
+ * would constitute an exception which should be handled by the processing
+ * cores. PAUSE packets should not be forwarded.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PREAMBLE can be consumed
+ by the HW so when PRE_ALIGN is set, PRE_FREE,
+ PRE_STRP must be set for correct operation.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pad_len : 1; /**< When set, disables the length check for non-min
+ sized pkts with padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ 0 - 254 cycles of PREAMBLE followed by SFD
+ PRE_FREE must be set if PRE_ALIGN is set.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped
+ PRE_STRP must be set if PRE_ALIGN is set.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t pre_align : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn56xxp1;
+} cvmx_agl_gmx_rxx_frm_ctl_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_frm_max
+ *
+ * AGL_GMX_RX_FRM_MAX = Frame Max length
+ *
+ *
+ * Notes:
+ * When changing the LEN field, be sure that LEN does not exceed
+ * AGL_GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
+ * are within the maximum length parameter to be rejected because they exceed
+ * the AGL_GMX_RX_JABBER[CNT] limit.
+ *
+ * Notes:
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_max_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t len : 16; /**< Byte count for Max-sized frame check
+ Failing packets set the MAXERR interrupt and are
+ optionally sent with opcode==MAXERR
+ LEN <= AGL_GMX_RX_JABBER[CNT] */
+#else
+ uint64_t len : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1;
+} cvmx_agl_gmx_rxx_frm_max_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_frm_min
+ *
+ * AGL_GMX_RX_FRM_MIN = Frame Min length
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_min_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t len : 16; /**< Byte count for Min-sized frame check
+ Failing packets set the MINERR interrupt and are
+ optionally sent with opcode==MINERR */
+#else
+ uint64_t len : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1;
+} cvmx_agl_gmx_rxx_frm_min_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_ifg
+ *
+ * AGL_GMX_RX_IFG = RX Min IFG
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_ifg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t ifg : 4; /**< Min IFG between packets used to determine IFGERR */
+#else
+ uint64_t ifg : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_ifg_s cn52xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_ifg_s cn56xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1;
+} cvmx_agl_gmx_rxx_ifg_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_int_en
+ *
+ * AGL_GMX_RX_INT_EN = Interrupt Enable
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_int_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< MII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t reserved_1_1 : 1;
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_int_en_s cn52xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_s cn56xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn56xxp1;
+} cvmx_agl_gmx_rxx_int_en_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_int_reg
+ *
+ * AGL_GMX_RX_INT_REG = Interrupt Register
+ *
+ *
+ * Notes:
+ * (1) exceptions will only be raised to the control processor if the
+ * corresponding bit in the AGL_GMX_RX_INT_EN register is set.
+ *
+ * (2) exception conditions 10:0 can also set the rcv/opcode in the received
+ * packet's workQ entry. The AGL_GMX_RX_FRM_CHK register provides a bit mask
+ * for configuring which conditions set the error.
+ *
+ * (3) in half duplex operation, the expectation is that collisions will appear
+ * as MINERRs.
+ *
+ * (4) JABBER - An RX Jabber error indicates that a packet was received which
+ * is longer than the maximum allowed packet as defined by the
+ * system. GMX will truncate the packet at the JABBER count.
+ * Failure to do so could lead to system instabilty.
+ *
+ * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
+ * AGL_GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
+ * > AGL_GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
+ *
+ * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < AGL_GMX_RX_FRM_MIN.
+ *
+ * (8) ALNERR - Indicates that the packet received was not an integer number of
+ * bytes. If FCS checking is enabled, ALNERR will only assert if
+ * the FCS is bad. If FCS checking is disabled, ALNERR will
+ * assert in all non-integer frame cases.
+ *
+ * (9) Collisions - Collisions can only occur in half-duplex mode. A collision
+ * is assumed by the receiver when the received
+ * frame < AGL_GMX_RX_FRM_MIN - this is normally a MINERR
+ *
+ * (A) LENERR - Length errors occur when the received packet does not match the
+ * length field. LENERR is only checked for packets between 64
+ * and 1500 bytes. For untagged frames, the length must exact
+ * match. For tagged frames the length or length+4 must match.
+ *
+ * (B) PCTERR - checks that the frame transtions from PREAMBLE=>SFD=>DATA.
+ * Does not check the number of PREAMBLE cycles.
+ *
+ * (C) OVRERR - Not to be included in the HRM
+ *
+ * OVRERR is an architectural assertion check internal to GMX to
+ * make sure no assumption was violated. In a correctly operating
+ * system, this interrupt can never fire.
+ *
+ * GMX has an internal arbiter which selects which of 4 ports to
+ * buffer in the main RX FIFO. If we normally buffer 8 bytes,
+ * then each port will typically push a tick every 8 cycles - if
+ * the packet interface is going as fast as possible. If there
+ * are four ports, they push every two cycles. So that's the
+ * assumption. That the inbound module will always be able to
+ * consume the tick before another is produced. If that doesn't
+ * happen - that's when OVRERR will assert.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_int_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< MII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t reserved_1_1 : 1;
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn52xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn56xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn56xxp1;
+} cvmx_agl_gmx_rxx_int_reg_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_jabber
+ *
+ * AGL_GMX_RX_JABBER = The max size packet after which GMX will truncate
+ *
+ *
+ * Notes:
+ * CNT must be 8-byte aligned such that CNT[2:0] == 0
+ *
+ * The packet that will be sent to the packet input logic will have an
+ * additionl 8 bytes if AGL_GMX_RX_FRM_CTL[PRE_CHK] is set and
+ * AGL_GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is
+ * defined as...
+ *
+ * max_sized_packet = AGL_GMX_RX_JABBER[CNT]+((AGL_GMX_RX_FRM_CTL[PRE_CHK] & !AGL_GMX_RX_FRM_CTL[PRE_STRP])*8)
+ *
+ * Be sure the CNT field value is at least as large as the
+ * AGL_GMX_RX_FRM_MAX[LEN] value. Failure to meet this constraint will cause
+ * packets that are within the AGL_GMX_RX_FRM_MAX[LEN] length to be rejected
+ * because they exceed the CNT limit.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_jabber_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt : 16; /**< Byte count for jabber check
+ Failing packets set the JABBER interrupt and are
+ optionally sent with opcode==JABBER
+ GMX will truncate the packet to CNT bytes
+ CNT >= AGL_GMX_RX_FRM_MAX[LEN] */
+#else
+ uint64_t cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_jabber_s cn52xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_jabber_s cn56xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1;
+} cvmx_agl_gmx_rxx_jabber_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_pause_drop_time
+ *
+ * AGL_GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */
+#else
+ uint64_t status : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1;
+} cvmx_agl_gmx_rxx_pause_drop_time_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_stats_ctl
+ *
+ * AGL_GMX_RX_STATS_CTL = RX Stats Control register
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */
+#else
+ uint64_t rd_clr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1;
+} cvmx_agl_gmx_rxx_stats_ctl_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of received good packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1;
+} cvmx_agl_gmx_rxx_stats_octs_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs_ctl
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of received pause packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1;
+} cvmx_agl_gmx_rxx_stats_octs_ctl_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs_dmac
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of filtered dmac packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1;
+} cvmx_agl_gmx_rxx_stats_octs_dmac_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs_drp
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of dropped packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1;
+} cvmx_agl_gmx_rxx_stats_octs_drp_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts
+ *
+ * AGL_GMX_RX_STATS_PKTS
+ *
+ * Count of good received packets - packets that are not recognized as PAUSE
+ * packets, dropped due the DMAC filter, dropped due FIFO full status, or
+ * have any other OPCODE (FCS, Length, etc).
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of received good packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1;
+} cvmx_agl_gmx_rxx_stats_pkts_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_bad
+ *
+ * AGL_GMX_RX_STATS_PKTS_BAD
+ *
+ * Count of all packets received with some error that were not dropped
+ * either due to the dmac filter or lack of room in the receive FIFO.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of bad packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1;
+} cvmx_agl_gmx_rxx_stats_pkts_bad_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_ctl
+ *
+ * AGL_GMX_RX_STATS_PKTS_CTL
+ *
+ * Count of all packets received that were recognized as Flow Control or
+ * PAUSE packets. PAUSE packets with any kind of error are counted in
+ * AGL_GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or
+ * forwarded based on the AGL_GMX_RX_FRM_CTL[CTL_DRP] bit. This count
+ * increments regardless of whether the packet is dropped. Pause packets
+ * will never be counted in AGL_GMX_RX_STATS_PKTS. Packets dropped due the dmac
+ * filter will be counted in AGL_GMX_RX_STATS_PKTS_DMAC and not here.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of received pause packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1;
+} cvmx_agl_gmx_rxx_stats_pkts_ctl_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_dmac
+ *
+ * AGL_GMX_RX_STATS_PKTS_DMAC
+ *
+ * Count of all packets received that were dropped by the dmac filter.
+ * Packets that match the DMAC will be dropped and counted here regardless
+ * of if they were bad packets. These packets will never be counted in
+ * AGL_GMX_RX_STATS_PKTS.
+ *
+ * Some packets that were not able to satisify the DECISION_CNT may not
+ * actually be dropped by Octeon, but they will be counted here as if they
+ * were dropped.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of filtered dmac packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1;
+} cvmx_agl_gmx_rxx_stats_pkts_dmac_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_drp
+ *
+ * AGL_GMX_RX_STATS_PKTS_DRP
+ *
+ * Count of all packets received that were dropped due to a full receive
+ * FIFO. This counts good and bad packets received - all packets dropped by
+ * the FIFO. It does not count packets dropped by the dmac or pause packet
+ * filters.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of dropped packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1;
+} cvmx_agl_gmx_rxx_stats_pkts_drp_t;
+
+
+/**
+ * cvmx_agl_gmx_rx#_udd_skp
+ *
+ * AGL_GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data
+ *
+ *
+ * Notes:
+ * (1) The skip bytes are part of the packet and will be sent down the NCB
+ * packet interface and will be handled by PKI.
+ *
+ * (2) The system can determine if the UDD bytes are included in the FCS check
+ * by using the FCSSEL field - if the FCS check is enabled.
+ *
+ * (3) Assume that the preamble/sfd is always at the start of the frame - even
+ * before UDD bytes. In most cases, there will be no preamble in these
+ * cases since it will be MII to MII communication without a PHY
+ * involved.
+ *
+ * (4) We can still do address filtering and control packet filtering is the
+ * user desires.
+ *
+ * (5) UDD_SKP must be 0 in half-duplex operation unless
+ * AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear. If AGL_GMX_RX_FRM_CTL[PRE_CHK] is set,
+ * then UDD_SKP will normally be 8.
+ *
+ * (6) In all cases, the UDD bytes will be sent down the packet interface as
+ * part of the packet. The UDD bytes are never stripped from the actual
+ * packet.
+ *
+ * (7) If LEN != 0, then AGL_GMX_RX_FRM_CHK[LENERR] will be disabled and AGL_GMX_RX_INT_REG[LENERR] will be zero
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_udd_skp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation
+ 0 = all skip bytes are included in FCS
+ 1 = the skip bytes are not included in FCS */
+ uint64_t reserved_7_7 : 1;
+ uint64_t len : 7; /**< Amount of User-defined data before the start of
+ the L2 data. Zero means L2 comes first.
+ Max value is 64. */
+#else
+ uint64_t len : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t fcssel : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1;
+} cvmx_agl_gmx_rxx_udd_skp_t;
+
+
+/**
+ * cvmx_agl_gmx_rx_bp_drop#
+ *
+ * AGL_GMX_RX_BP_DROP = FIFO mark for packet drop
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_dropx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO.
+ When the FIFO exceeds this count, packets will
+ be dropped and not buffered.
+ MARK should typically be programmed to 2.
+ Failure to program correctly can lead to system
+ instability. */
+#else
+ uint64_t mark : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1;
+} cvmx_agl_gmx_rx_bp_dropx_t;
+
+
+/**
+ * cvmx_agl_gmx_rx_bp_off#
+ *
+ * AGL_GMX_RX_BP_OFF = Lowater mark for packet drop
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_offx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */
+#else
+ uint64_t mark : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1;
+} cvmx_agl_gmx_rx_bp_offx_t;
+
+
+/**
+ * cvmx_agl_gmx_rx_bp_on#
+ *
+ * AGL_GMX_RX_BP_ON = Hiwater mark for port/interface backpressure
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_onx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure. */
+#else
+ uint64_t mark : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1;
+} cvmx_agl_gmx_rx_bp_onx_t;
+
+
+/**
+ * cvmx_agl_gmx_rx_prt_info
+ *
+ * AGL_GMX_RX_PRT_INFO = state information for the ports
+ *
+ *
+ * Notes:
+ * COMMIT[0], DROP[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * COMMIT[1], DROP[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_prt_info_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_18_63 : 46;
+ uint64_t drop : 2; /**< Port indication that data was dropped */
+ uint64_t reserved_2_15 : 14;
+ uint64_t commit : 2; /**< Port indication that SOP was accepted */
+#else
+ uint64_t commit : 2;
+ uint64_t reserved_2_15 : 14;
+ uint64_t drop : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_prt_info_s cn52xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_prt_info_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t drop : 1; /**< Port indication that data was dropped */
+ uint64_t reserved_1_15 : 15;
+ uint64_t commit : 1; /**< Port indication that SOP was accepted */
+#else
+ uint64_t commit : 1;
+ uint64_t reserved_1_15 : 15;
+ uint64_t drop : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1;
+} cvmx_agl_gmx_rx_prt_info_t;
+
+
+/**
+ * cvmx_agl_gmx_rx_tx_status
+ *
+ * AGL_GMX_RX_TX_STATUS = GMX RX/TX Status
+ *
+ *
+ * Notes:
+ * RX[0], TX[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * RX[1], TX[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_tx_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t tx : 2; /**< Transmit data since last read */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rx : 2; /**< Receive data since last read */
+#else
+ uint64_t rx : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t tx : 2;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_tx_status_s cn52xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_tx_status_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t tx : 1; /**< Transmit data since last read */
+ uint64_t reserved_1_3 : 3;
+ uint64_t rx : 1; /**< Receive data since last read */
+#else
+ uint64_t rx : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t tx : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1;
+} cvmx_agl_gmx_rx_tx_status_t;
+
+
+/**
+ * cvmx_agl_gmx_smac#
+ *
+ * AGL_GMX_SMAC = MII SMAC
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_smacx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t smac : 48; /**< The SMAC field is used for generating and
+ accepting Control Pause packets */
+#else
+ uint64_t smac : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_smacx_s cn52xx;
+ struct cvmx_agl_gmx_smacx_s cn52xxp1;
+ struct cvmx_agl_gmx_smacx_s cn56xx;
+ struct cvmx_agl_gmx_smacx_s cn56xxp1;
+} cvmx_agl_gmx_smacx_t;
+
+
+/**
+ * cvmx_agl_gmx_stat_bp
+ *
+ * AGL_GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_stat_bp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t bp : 1; /**< Current BP state */
+ uint64_t cnt : 16; /**< Number of cycles that BP has been asserted
+ Saturating counter */
+#else
+ uint64_t cnt : 16;
+ uint64_t bp : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_agl_gmx_stat_bp_s cn52xx;
+ struct cvmx_agl_gmx_stat_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_stat_bp_s cn56xx;
+ struct cvmx_agl_gmx_stat_bp_s cn56xxp1;
+} cvmx_agl_gmx_stat_bp_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_append
+ *
+ * AGL_GMX_TX_APPEND = MII TX Append Control
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_append_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet
+ when FCS is clear. Pause packets are normally
+ padded to 60 bytes. If
+ AGL_GMX_TX_MIN_PKT[MIN_SIZE] exceeds 59, then
+ FORCE_FCS will not be used. */
+ uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */
+ uint64_t pad : 1; /**< Append PAD bytes such that min sized */
+ uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer */
+#else
+ uint64_t preamble : 1;
+ uint64_t pad : 1;
+ uint64_t fcs : 1;
+ uint64_t force_fcs : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_append_s cn52xx;
+ struct cvmx_agl_gmx_txx_append_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_append_s cn56xx;
+ struct cvmx_agl_gmx_txx_append_s cn56xxp1;
+} cvmx_agl_gmx_txx_append_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_ctl
+ *
+ * AGL_GMX_TX_CTL = TX Control register
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats
+ and interrupts */
+ uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats
+ and interrupts */
+#else
+ uint64_t xscol_en : 1;
+ uint64_t xsdef_en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_ctl_s cn52xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_ctl_s cn56xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn56xxp1;
+} cvmx_agl_gmx_txx_ctl_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_min_pkt
+ *
+ * AGL_GMX_TX_MIN_PKT = MII TX Min Size Packet (PAD upto min size)
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_min_pkt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied
+ Padding is only appened when
+ AGL_GMX_TX_APPEND[PAD] for the coresponding MII
+ port is set. Packets will be padded to
+ MIN_SIZE+1 The reset value will pad to 60 bytes. */
+#else
+ uint64_t min_size : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn52xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn56xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1;
+} cvmx_agl_gmx_txx_min_pkt_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_pause_pkt_interval
+ *
+ * AGL_GMX_TX_PAUSE_PKT_INTERVAL = MII TX Pause Packet transmission interval - how often PAUSE packets will be sent
+ *
+ *
+ * Notes:
+ * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
+ * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
+ * designer. It is suggested that TIME be much greater than INTERVAL and
+ * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
+ * count and then when the backpressure condition is lifted, a PAUSE packet
+ * with TIME==0 will be sent indicating that Octane is ready for additional
+ * data.
+ *
+ * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
+ * suggested that TIME and INTERVAL are programmed such that they satisify the
+ * following rule...
+ *
+ * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
+ *
+ * where largest_pkt_size is that largest packet that the system can send
+ * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
+ * of the PAUSE packet (normally 64B).
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t interval : 16; /**< Arbitrate for a pause packet every (INTERVAL*512)
+ bit-times.
+ Normally, 0 < INTERVAL < AGL_GMX_TX_PAUSE_PKT_TIME
+ INTERVAL=0, will only send a single PAUSE packet
+ for each backpressure event */
+#else
+ uint64_t interval : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1;
+} cvmx_agl_gmx_txx_pause_pkt_interval_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_pause_pkt_time
+ *
+ * AGL_GMX_TX_PAUSE_PKT_TIME = MII TX Pause Packet pause_time field
+ *
+ *
+ * Notes:
+ * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
+ * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
+ * designer. It is suggested that TIME be much greater than INTERVAL and
+ * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
+ * count and then when the backpressure condition is lifted, a PAUSE packet
+ * with TIME==0 will be sent indicating that Octane is ready for additional
+ * data.
+ *
+ * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
+ * suggested that TIME and INTERVAL are programmed such that they satisify the
+ * following rule...
+ *
+ * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
+ *
+ * where largest_pkt_size is that largest packet that the system can send
+ * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
+ * of the PAUSE packet (normally 64B).
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< The pause_time field placed is outbnd pause pkts
+ pause_time is in 512 bit-times
+ Normally, TIME > AGL_GMX_TX_PAUSE_PKT_INTERVAL */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1;
+} cvmx_agl_gmx_txx_pause_pkt_time_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_pause_togo
+ *
+ * AGL_GMX_TX_PAUSE_TOGO = MII TX Amount of time remaining to backpressure
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_togo_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< Amount of time remaining to backpressure */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1;
+} cvmx_agl_gmx_txx_pause_togo_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_pause_zero
+ *
+ * AGL_GMX_TX_PAUSE_ZERO = MII TX Amount of time remaining to backpressure
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_zero_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t send : 1; /**< When backpressure condition clear, send PAUSE
+ packet with pause_time of zero to enable the
+ channel */
+#else
+ uint64_t send : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1;
+} cvmx_agl_gmx_txx_pause_zero_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_soft_pause
+ *
+ * AGL_GMX_TX_SOFT_PAUSE = MII TX Software Pause
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_soft_pause_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times
+ for full-duplex operation only */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn52xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn56xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1;
+} cvmx_agl_gmx_txx_soft_pause_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stat0
+ *
+ * AGL_GMX_TX_STAT0 = AGL_GMX_TX_STATS_XSDEF / AGL_GMX_TX_STATS_XSCOL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t xsdef : 32; /**< Number of packets dropped (never successfully
+ sent) due to excessive deferal */
+ uint64_t xscol : 32; /**< Number of packets dropped (never successfully
+ sent) due to excessive collision. Defined by
+ AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
+#else
+ uint64_t xscol : 32;
+ uint64_t xsdef : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat0_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat0_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn56xxp1;
+} cvmx_agl_gmx_txx_stat0_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stat1
+ *
+ * AGL_GMX_TX_STAT1 = AGL_GMX_TX_STATS_SCOL / AGL_GMX_TX_STATS_MCOL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t scol : 32; /**< Number of packets sent with a single collision */
+ uint64_t mcol : 32; /**< Number of packets sent with multiple collisions
+ but < AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
+#else
+ uint64_t mcol : 32;
+ uint64_t scol : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat1_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat1_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn56xxp1;
+} cvmx_agl_gmx_txx_stat1_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stat2
+ *
+ * AGL_GMX_TX_STAT2 = AGL_GMX_TX_STATS_OCTS
+ *
+ *
+ * Notes:
+ * - Octect counts are the sum of all data transmitted on the wire including
+ * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect
+ * counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t octs : 48; /**< Number of total octets sent on the interface.
+ Does not count octets from frames that were
+ truncated due to collisions in halfdup mode. */
+#else
+ uint64_t octs : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat2_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat2_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn56xxp1;
+} cvmx_agl_gmx_txx_stat2_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stat3
+ *
+ * AGL_GMX_TX_STAT3 = AGL_GMX_TX_STATS_PKTS
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t pkts : 32; /**< Number of total frames sent on the interface.
+ Does not count frames that were truncated due to
+ collisions in halfdup mode. */
+#else
+ uint64_t pkts : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat3_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat3_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn56xxp1;
+} cvmx_agl_gmx_txx_stat3_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stat4
+ *
+ * AGL_GMX_TX_STAT4 = AGL_GMX_TX_STATS_HIST1 (64) / AGL_GMX_TX_STATS_HIST0 (<64)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */
+ uint64_t hist0 : 32; /**< Number of packets sent with an octet count
+ of < 64. */
+#else
+ uint64_t hist0 : 32;
+ uint64_t hist1 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat4_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat4_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn56xxp1;
+} cvmx_agl_gmx_txx_stat4_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stat5
+ *
+ * AGL_GMX_TX_STAT5 = AGL_GMX_TX_STATS_HIST3 (128- 255) / AGL_GMX_TX_STATS_HIST2 (65- 127)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t hist3 : 32; /**< Number of packets sent with an octet count of
+ 128 - 255. */
+ uint64_t hist2 : 32; /**< Number of packets sent with an octet count of
+ 65 - 127. */
+#else
+ uint64_t hist2 : 32;
+ uint64_t hist3 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat5_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat5_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn56xxp1;
+} cvmx_agl_gmx_txx_stat5_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stat6
+ *
+ * AGL_GMX_TX_STAT6 = AGL_GMX_TX_STATS_HIST5 (512-1023) / AGL_GMX_TX_STATS_HIST4 (256-511)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t hist5 : 32; /**< Number of packets sent with an octet count of
+ 512 - 1023. */
+ uint64_t hist4 : 32; /**< Number of packets sent with an octet count of
+ 256 - 511. */
+#else
+ uint64_t hist4 : 32;
+ uint64_t hist5 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat6_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat6_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn56xxp1;
+} cvmx_agl_gmx_txx_stat6_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stat7
+ *
+ * AGL_GMX_TX_STAT7 = AGL_GMX_TX_STATS_HIST7 (1024-1518) / AGL_GMX_TX_STATS_HIST6 (>1518)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t hist7 : 32; /**< Number of packets sent with an octet count
+ of > 1518. */
+ uint64_t hist6 : 32; /**< Number of packets sent with an octet count of
+ 1024 - 1518. */
+#else
+ uint64_t hist6 : 32;
+ uint64_t hist7 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat7_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat7_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn56xxp1;
+} cvmx_agl_gmx_txx_stat7_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stat8
+ *
+ * AGL_GMX_TX_STAT8 = AGL_GMX_TX_STATS_MCST / AGL_GMX_TX_STATS_BCST
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the
+ * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet
+ * as per the 802.3 frame definition. If the system requires additional data
+ * before the L2 header, then the MCST and BCST counters may not reflect
+ * reality and should be ignored by software.
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat8_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC.
+ Does not include BCST packets. */
+ uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC.
+ Does not include MCST packets. */
+#else
+ uint64_t bcst : 32;
+ uint64_t mcst : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat8_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat8_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn56xxp1;
+} cvmx_agl_gmx_txx_stat8_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stat9
+ *
+ * AGL_GMX_TX_STAT9 = AGL_GMX_TX_STATS_UNDFLW / AGL_GMX_TX_STATS_CTL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat9_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t undflw : 32; /**< Number of underflow packets */
+ uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control)
+ generated by GMX. It does not include control
+ packets forwarded or generated by the PP's. */
+#else
+ uint64_t ctl : 32;
+ uint64_t undflw : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat9_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat9_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn56xxp1;
+} cvmx_agl_gmx_txx_stat9_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_stats_ctl
+ *
+ * AGL_GMX_TX_STATS_CTL = TX Stats Control register
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stats_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t rd_clr : 1; /**< Stats registers will clear on reads */
+#else
+ uint64_t rd_clr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn52xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1;
+} cvmx_agl_gmx_txx_stats_ctl_t;
+
+
+/**
+ * cvmx_agl_gmx_tx#_thresh
+ *
+ * AGL_GMX_TX_THRESH = MII TX Threshold
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_thresh_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t cnt : 6; /**< Number of 16B ticks to accumulate in the TX FIFO
+ before sending on the MII interface
+ This register should be large enough to prevent
+ underflow on the MII interface and must never
+ be set below 4. This register cannot exceed the
+ the TX FIFO depth which is 32 16B entries. */
+#else
+ uint64_t cnt : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_thresh_s cn52xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_thresh_s cn56xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn56xxp1;
+} cvmx_agl_gmx_txx_thresh_t;
+
+
+/**
+ * cvmx_agl_gmx_tx_bp
+ *
+ * AGL_GMX_TX_BP = MII TX BackPressure Register
+ *
+ *
+ * Notes:
+ * BP[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * BP[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_bp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t bp : 2; /**< Port BackPressure status
+ 0=Port is available
+ 1=Port should be back pressured */
+#else
+ uint64_t bp : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_bp_s cn52xx;
+ struct cvmx_agl_gmx_tx_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_bp_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t bp : 1; /**< Port BackPressure status
+ 0=Port is available
+ 1=Port should be back pressured */
+#else
+ uint64_t bp : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1;
+} cvmx_agl_gmx_tx_bp_t;
+
+
+/**
+ * cvmx_agl_gmx_tx_col_attempt
+ *
+ * AGL_GMX_TX_COL_ATTEMPT = MII TX collision attempts before dropping frame
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_col_attempt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t limit : 5; /**< Collision Attempts */
+#else
+ uint64_t limit : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn52xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn56xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1;
+} cvmx_agl_gmx_tx_col_attempt_t;
+
+
+/**
+ * cvmx_agl_gmx_tx_ifg
+ *
+ * Common
+ *
+ *
+ * AGL_GMX_TX_IFG = MII TX Interframe Gap
+ *
+ * Notes:
+ * Notes:
+ * * Programming IFG1 and IFG2.
+ *
+ * For half-duplex systems that require IEEE 802.3 compatibility, IFG1 must
+ * be in the range of 1-8, IFG2 must be in the range of 4-12, and the
+ * IFG1+IFG2 sum must be 12.
+ *
+ * For full-duplex systems that require IEEE 802.3 compatibility, IFG1 must
+ * be in the range of 1-11, IFG2 must be in the range of 1-11, and the
+ * IFG1+IFG2 sum must be 12.
+ *
+ * For all other systems, IFG1 and IFG2 can be any value in the range of
+ * 1-15. Allowing for a total possible IFG sum of 2-30.
+ *
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_ifg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing
+ If CRS is detected during IFG2, then the
+ interFrameSpacing timer is not reset and a frame
+ is transmited once the timer expires. */
+ uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing
+ If CRS is detected during IFG1, then the
+ interFrameSpacing timer is reset and a frame is
+ not transmited. */
+#else
+ uint64_t ifg1 : 4;
+ uint64_t ifg2 : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_ifg_s cn52xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_ifg_s cn56xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn56xxp1;
+} cvmx_agl_gmx_tx_ifg_t;
+
+
+/**
+ * cvmx_agl_gmx_tx_int_en
+ *
+ * AGL_GMX_TX_INT_EN = Interrupt Enable
+ *
+ *
+ * Notes:
+ * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_int_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_18_63 : 46;
+ uint64_t late_col : 2; /**< TX Late Collision */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_int_en_s cn52xx;
+ struct cvmx_agl_gmx_tx_int_en_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_en_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t late_col : 1; /**< TX Late Collision */
+ uint64_t reserved_13_15 : 3;
+ uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_9_11 : 3;
+ uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_3_7 : 5;
+ uint64_t undflw : 1; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t xscol : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t xsdef : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t late_col : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1;
+} cvmx_agl_gmx_tx_int_en_t;
+
+
+/**
+ * cvmx_agl_gmx_tx_int_reg
+ *
+ * AGL_GMX_TX_INT_REG = Interrupt Register
+ *
+ *
+ * Notes:
+ * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_int_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_18_63 : 46;
+ uint64_t late_col : 2; /**< TX Late Collision */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_int_reg_s cn52xx;
+ struct cvmx_agl_gmx_tx_int_reg_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t late_col : 1; /**< TX Late Collision */
+ uint64_t reserved_13_15 : 3;
+ uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_9_11 : 3;
+ uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_3_7 : 5;
+ uint64_t undflw : 1; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t xscol : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t xsdef : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t late_col : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1;
+} cvmx_agl_gmx_tx_int_reg_t;
+
+
+/**
+ * cvmx_agl_gmx_tx_jam
+ *
+ * AGL_GMX_TX_JAM = MII TX Jam Pattern
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_jam_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t jam : 8; /**< Jam pattern */
+#else
+ uint64_t jam : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_jam_s cn52xx;
+ struct cvmx_agl_gmx_tx_jam_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_jam_s cn56xx;
+ struct cvmx_agl_gmx_tx_jam_s cn56xxp1;
+} cvmx_agl_gmx_tx_jam_t;
+
+
+/**
+ * cvmx_agl_gmx_tx_lfsr
+ *
+ * AGL_GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_lfsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random
+ numbers to compute truncated binary exponential
+ backoff. */
+#else
+ uint64_t lfsr : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_lfsr_s cn52xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_lfsr_s cn56xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1;
+} cvmx_agl_gmx_tx_lfsr_t;
+
+
+/**
+ * cvmx_agl_gmx_tx_ovr_bp
+ *
+ * AGL_GMX_TX_OVR_BP = MII TX Override BackPressure
+ *
+ *
+ * Notes:
+ * IGN_FULL[0], BP[0], EN[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * IGN_FULL[1], BP[1], EN[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_ovr_bp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t en : 2; /**< Per port Enable back pressure override */
+ uint64_t reserved_6_7 : 2;
+ uint64_t bp : 2; /**< Port BackPressure status to use
+ 0=Port is available
+ 1=Port should be back pressured */
+ uint64_t reserved_2_3 : 2;
+ uint64_t ign_full : 2; /**< Ignore the RX FIFO full when computing BP */
+#else
+ uint64_t ign_full : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t bp : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t en : 2;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn52xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_ovr_bp_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t en : 1; /**< Per port Enable back pressure override */
+ uint64_t reserved_5_7 : 3;
+ uint64_t bp : 1; /**< Port BackPressure status to use
+ 0=Port is available
+ 1=Port should be back pressured */
+ uint64_t reserved_1_3 : 3;
+ uint64_t ign_full : 1; /**< Ignore the RX FIFO full when computing BP */
+#else
+ uint64_t ign_full : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t bp : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t en : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1;
+} cvmx_agl_gmx_tx_ovr_bp_t;
+
+
+/**
+ * cvmx_agl_gmx_tx_pause_pkt_dmac
+ *
+ * AGL_GMX_TX_PAUSE_PKT_DMAC = MII TX Pause Packet DMAC field
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */
+#else
+ uint64_t dmac : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1;
+} cvmx_agl_gmx_tx_pause_pkt_dmac_t;
+
+
+/**
+ * cvmx_agl_gmx_tx_pause_pkt_type
+ *
+ * AGL_GMX_TX_PAUSE_PKT_TYPE = MII TX Pause Packet TYPE field
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */
+#else
+ uint64_t type : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1;
+} cvmx_agl_gmx_tx_pause_pkt_type_t;
+
+
+/**
+ * cvmx_asx#_gmii_rx_clk_set
+ *
+ * ASX_GMII_RX_CLK_SET = GMII Clock delay setting
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_gmii_rx_clk_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the RXCLK (GMII receive clk)
+ delay line. The intrinsic delay can range from
+ 50ps to 80ps per tap. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
+} cvmx_asxx_gmii_rx_clk_set_t;
+
+
+/**
+ * cvmx_asx#_gmii_rx_dat_set
+ *
+ * ASX_GMII_RX_DAT_SET = GMII Clock delay setting
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_gmii_rx_dat_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the RXD (GMII receive data)
+ delay lines. The intrinsic delay can range from
+ 50ps to 80ps per tap. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
+} cvmx_asxx_gmii_rx_dat_set_t;
+
+
+/**
+ * cvmx_asx#_int_en
+ *
+ * ASX_INT_EN = Interrupt Enable
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_int_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t txpsh : 4; /**< TX FIFO overflow on RMGII port */
+ uint64_t txpop : 4; /**< TX FIFO underflow on RMGII port */
+ uint64_t ovrflw : 4; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 4;
+ uint64_t txpop : 4;
+ uint64_t txpsh : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_asxx_int_en_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_11_63 : 53;
+ uint64_t txpsh : 3; /**< TX FIFO overflow on RMGII port */
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpop : 3; /**< TX FIFO underflow on RMGII port */
+ uint64_t reserved_3_3 : 1;
+ uint64_t ovrflw : 3; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t txpop : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpsh : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_int_en_cn30xx cn31xx;
+ struct cvmx_asxx_int_en_s cn38xx;
+ struct cvmx_asxx_int_en_s cn38xxp2;
+ struct cvmx_asxx_int_en_cn30xx cn50xx;
+ struct cvmx_asxx_int_en_s cn58xx;
+ struct cvmx_asxx_int_en_s cn58xxp1;
+} cvmx_asxx_int_en_t;
+
+
+/**
+ * cvmx_asx#_int_reg
+ *
+ * ASX_INT_REG = Interrupt Register
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_int_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t txpsh : 4; /**< TX FIFO overflow on RMGII port */
+ uint64_t txpop : 4; /**< TX FIFO underflow on RMGII port */
+ uint64_t ovrflw : 4; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 4;
+ uint64_t txpop : 4;
+ uint64_t txpsh : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_asxx_int_reg_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_11_63 : 53;
+ uint64_t txpsh : 3; /**< TX FIFO overflow on RMGII port */
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpop : 3; /**< TX FIFO underflow on RMGII port */
+ uint64_t reserved_3_3 : 1;
+ uint64_t ovrflw : 3; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t txpop : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpsh : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_int_reg_cn30xx cn31xx;
+ struct cvmx_asxx_int_reg_s cn38xx;
+ struct cvmx_asxx_int_reg_s cn38xxp2;
+ struct cvmx_asxx_int_reg_cn30xx cn50xx;
+ struct cvmx_asxx_int_reg_s cn58xx;
+ struct cvmx_asxx_int_reg_s cn58xxp1;
+} cvmx_asxx_int_reg_t;
+
+
+/**
+ * cvmx_asx#_mii_rx_dat_set
+ *
+ * ASX_MII_RX_DAT_SET = GMII Clock delay setting
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_mii_rx_dat_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the RXD (MII receive data)
+ delay lines. The intrinsic delay can range from
+ 50ps to 80ps per tap. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
+ struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
+} cvmx_asxx_mii_rx_dat_set_t;
+
+
+/**
+ * cvmx_asx#_prt_loop
+ *
+ * ASX_PRT_LOOP = Internal Loopback mode - TX FIFO output goes into RX FIFO (and maybe pins)
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_prt_loop_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t ext_loop : 4; /**< External Loopback Enable
+ 0 = No Loopback (TX FIFO is filled by RMGII)
+ 1 = RX FIFO drives the TX FIFO
+ - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
+ - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
+ - core clock > 250MHZ
+ - rxc must not deviate from the +-50ppm
+ - if txc>rxc, idle cycle may drop over time */
+ uint64_t int_loop : 4; /**< Internal Loopback Enable
+ 0 = No Loopback (RX FIFO is filled by RMGII pins)
+ 1 = TX FIFO drives the RX FIFO
+ Note, in internal loop-back mode, the RGMII link
+ status is not used (since there is no real PHY).
+ Software cannot use the inband status. */
+#else
+ uint64_t int_loop : 4;
+ uint64_t ext_loop : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_asxx_prt_loop_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_7_63 : 57;
+ uint64_t ext_loop : 3; /**< External Loopback Enable
+ 0 = No Loopback (TX FIFO is filled by RMGII)
+ 1 = RX FIFO drives the TX FIFO
+ - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
+ - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
+ - core clock > 250MHZ
+ - rxc must not deviate from the +-50ppm
+ - if txc>rxc, idle cycle may drop over time */
+ uint64_t reserved_3_3 : 1;
+ uint64_t int_loop : 3; /**< Internal Loopback Enable
+ 0 = No Loopback (RX FIFO is filled by RMGII pins)
+ 1 = TX FIFO drives the RX FIFO
+ - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
+ - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
+ - GMX_TX_CLK[CLK_CNT] must be 1
+ Note, in internal loop-back mode, the RGMII link
+ status is not used (since there is no real PHY).
+ Software cannot use the inband status. */
+#else
+ uint64_t int_loop : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t ext_loop : 3;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_prt_loop_cn30xx cn31xx;
+ struct cvmx_asxx_prt_loop_s cn38xx;
+ struct cvmx_asxx_prt_loop_s cn38xxp2;
+ struct cvmx_asxx_prt_loop_cn30xx cn50xx;
+ struct cvmx_asxx_prt_loop_s cn58xx;
+ struct cvmx_asxx_prt_loop_s cn58xxp1;
+} cvmx_asxx_prt_loop_t;
+
+
+/**
+ * cvmx_asx#_rld_bypass
+ *
+ * ASX_RLD_BYPASS
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_bypass_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t bypass : 1; /**< When set, the rld_dll setting is bypassed with
+ ASX_RLD_BYPASS_SETTING */
+#else
+ uint64_t bypass : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asxx_rld_bypass_s cn38xx;
+ struct cvmx_asxx_rld_bypass_s cn38xxp2;
+ struct cvmx_asxx_rld_bypass_s cn58xx;
+ struct cvmx_asxx_rld_bypass_s cn58xxp1;
+} cvmx_asxx_rld_bypass_t;
+
+
+/**
+ * cvmx_asx#_rld_bypass_setting
+ *
+ * ASX_RLD_BYPASS_SETTING
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_bypass_setting_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< The rld_dll setting bypass value */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_bypass_setting_s cn38xx;
+ struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
+ struct cvmx_asxx_rld_bypass_setting_s cn58xx;
+ struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
+} cvmx_asxx_rld_bypass_setting_t;
+
+
+/**
+ * cvmx_asx#_rld_comp
+ *
+ * ASX_RLD_COMP
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_comp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t pctl : 5; /**< PCTL Compensation Value
+ These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+ uint64_t nctl : 4; /**< These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 5;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_asxx_rld_comp_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+ uint64_t nctl : 4; /**< These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn38xx;
+ struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
+ struct cvmx_asxx_rld_comp_s cn58xx;
+ struct cvmx_asxx_rld_comp_s cn58xxp1;
+} cvmx_asxx_rld_comp_t;
+
+
+/**
+ * cvmx_asx#_rld_data_drv
+ *
+ * ASX_RLD_DATA_DRV
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_data_drv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< These bits specify a driving strength (positive
+ integer) for the RLD I/Os when the built-in
+ compensation circuit is bypassed. */
+ uint64_t nctl : 4; /**< These bits specify a driving strength (positive
+ integer) for the RLD I/Os when the built-in
+ compensation circuit is bypassed. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_asxx_rld_data_drv_s cn38xx;
+ struct cvmx_asxx_rld_data_drv_s cn38xxp2;
+ struct cvmx_asxx_rld_data_drv_s cn58xx;
+ struct cvmx_asxx_rld_data_drv_s cn58xxp1;
+} cvmx_asxx_rld_data_drv_t;
+
+
+/**
+ * cvmx_asx#_rld_fcram_mode
+ *
+ * ASX_RLD_FCRAM_MODE
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_fcram_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t mode : 1; /**< Memory Mode
+ - 0: RLDRAM
+ - 1: FCRAM */
+#else
+ uint64_t mode : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asxx_rld_fcram_mode_s cn38xx;
+ struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
+} cvmx_asxx_rld_fcram_mode_t;
+
+
+/**
+ * cvmx_asx#_rld_nctl_strong
+ *
+ * ASX_RLD_NCTL_STRONG
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_nctl_strong_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t nctl : 5; /**< Duke's drive control */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_nctl_strong_s cn38xx;
+ struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
+ struct cvmx_asxx_rld_nctl_strong_s cn58xx;
+ struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
+} cvmx_asxx_rld_nctl_strong_t;
+
+
+/**
+ * cvmx_asx#_rld_nctl_weak
+ *
+ * ASX_RLD_NCTL_WEAK
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_nctl_weak_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t nctl : 5; /**< UNUSED (not needed for O9N) */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_nctl_weak_s cn38xx;
+ struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
+ struct cvmx_asxx_rld_nctl_weak_s cn58xx;
+ struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
+} cvmx_asxx_rld_nctl_weak_t;
+
+
+/**
+ * cvmx_asx#_rld_pctl_strong
+ *
+ * ASX_RLD_PCTL_STRONG
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_pctl_strong_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t pctl : 5; /**< Duke's drive control */
+#else
+ uint64_t pctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_pctl_strong_s cn38xx;
+ struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
+ struct cvmx_asxx_rld_pctl_strong_s cn58xx;
+ struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
+} cvmx_asxx_rld_pctl_strong_t;
+
+
+/**
+ * cvmx_asx#_rld_pctl_weak
+ *
+ * ASX_RLD_PCTL_WEAK
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_pctl_weak_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t pctl : 5; /**< UNUSED (not needed for O9N) */
+#else
+ uint64_t pctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_pctl_weak_s cn38xx;
+ struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
+ struct cvmx_asxx_rld_pctl_weak_s cn58xx;
+ struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
+} cvmx_asxx_rld_pctl_weak_t;
+
+
+/**
+ * cvmx_asx#_rld_setting
+ *
+ * ASX_RLD_SETTING
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_setting_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_13_63 : 51;
+ uint64_t dfaset : 5; /**< RLD ClkGen DLL Setting(debug)
+ ** NEW O9N ** */
+ uint64_t dfalag : 1; /**< RLD ClkGen DLL Lag Error(debug)
+ ** NEW O9N ** */
+ uint64_t dfalead : 1; /**< RLD ClkGen DLL Lead Error(debug)
+ ** NEW O9N ** */
+ uint64_t dfalock : 1; /**< RLD ClkGen DLL Lock acquisition(debug)
+ ** NEW O9N ** */
+ uint64_t setting : 5; /**< RLDCK90 DLL Setting(debug) */
+#else
+ uint64_t setting : 5;
+ uint64_t dfalock : 1;
+ uint64_t dfalead : 1;
+ uint64_t dfalag : 1;
+ uint64_t dfaset : 5;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_asxx_rld_setting_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< This is the read-only true rld dll_setting. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn38xx;
+ struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
+ struct cvmx_asxx_rld_setting_s cn58xx;
+ struct cvmx_asxx_rld_setting_s cn58xxp1;
+} cvmx_asxx_rld_setting_t;
+
+
+/**
+ * cvmx_asx#_rx_clk_set#
+ *
+ * ASX_RX_CLK_SET = RGMII Clock delay setting
+ *
+ *
+ * Notes:
+ * Setting to place on the open-loop RXC (RGMII receive clk)
+ * delay line, which can delay the recieved clock. This
+ * can be used if the board and/or transmitting device
+ * has not otherwise delayed the clock.
+ *
+ * A value of SETTING=0 disables the delay line. The delay
+ * line should be disabled unless the transmitter or board
+ * does not delay the clock.
+ *
+ * Note that this delay line provides only a coarse control
+ * over the delay. Generally, it can only reliably provide
+ * a delay in the range 1.25-2.5ns, which may not be adequate
+ * for some system applications.
+ *
+ * The open loop delay line selects
+ * from among a series of tap positions. Each incremental
+ * tap position adds a delay of 50ps to 135ps per tap, depending
+ * on the chip, its temperature, and the voltage.
+ * To achieve from 1.25-2.5ns of delay on the recieved
+ * clock, a fixed value of SETTING=24 may work.
+ * For more precision, we recommend the following settings
+ * based on the chip voltage:
+ *
+ * VDD SETTING
+ * -----------------------------
+ * 1.0 18
+ * 1.05 19
+ * 1.1 21
+ * 1.15 22
+ * 1.2 23
+ * 1.25 24
+ * 1.3 25
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_clk_setx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the open-loop RXC delay line */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rx_clk_setx_s cn30xx;
+ struct cvmx_asxx_rx_clk_setx_s cn31xx;
+ struct cvmx_asxx_rx_clk_setx_s cn38xx;
+ struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
+ struct cvmx_asxx_rx_clk_setx_s cn50xx;
+ struct cvmx_asxx_rx_clk_setx_s cn58xx;
+ struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
+} cvmx_asxx_rx_clk_setx_t;
+
+
+/**
+ * cvmx_asx#_rx_prt_en
+ *
+ * ASX_RX_PRT_EN = RGMII Port Enable
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_prt_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t prt_en : 4; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_asxx_rx_prt_en_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_3_63 : 61;
+ uint64_t prt_en : 3; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
+ struct cvmx_asxx_rx_prt_en_s cn38xx;
+ struct cvmx_asxx_rx_prt_en_s cn38xxp2;
+ struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
+ struct cvmx_asxx_rx_prt_en_s cn58xx;
+ struct cvmx_asxx_rx_prt_en_s cn58xxp1;
+} cvmx_asxx_rx_prt_en_t;
+
+
+/**
+ * cvmx_asx#_rx_wol
+ *
+ * ASX_RX_WOL = RGMII RX Wake on LAN status register
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t status : 1; /**< Copy of PMCSR[15] - PME_status */
+ uint64_t enable : 1; /**< Copy of PMCSR[8] - PME_enable */
+#else
+ uint64_t enable : 1;
+ uint64_t status : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_s cn38xx;
+ struct cvmx_asxx_rx_wol_s cn38xxp2;
+} cvmx_asxx_rx_wol_t;
+
+
+/**
+ * cvmx_asx#_rx_wol_msk
+ *
+ * ASX_RX_WOL_MSK = RGMII RX Wake on LAN byte mask
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_msk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t msk : 64; /**< Bytes to include in the CRC signature */
+#else
+ uint64_t msk : 64;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_msk_s cn38xx;
+ struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
+} cvmx_asxx_rx_wol_msk_t;
+
+
+/**
+ * cvmx_asx#_rx_wol_powok
+ *
+ * ASX_RX_WOL_POWOK = RGMII RX Wake on LAN Power OK
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_powok_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t powerok : 1; /**< Power OK */
+#else
+ uint64_t powerok : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_powok_s cn38xx;
+ struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
+} cvmx_asxx_rx_wol_powok_t;
+
+
+/**
+ * cvmx_asx#_rx_wol_sig
+ *
+ * ASX_RX_WOL_SIG = RGMII RX Wake on LAN CRC signature
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_sig_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t sig : 32; /**< CRC signature */
+#else
+ uint64_t sig : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_sig_s cn38xx;
+ struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
+} cvmx_asxx_rx_wol_sig_t;
+
+
+/**
+ * cvmx_asx#_tx_clk_set#
+ *
+ * ASX_TX_CLK_SET = RGMII Clock delay setting
+ *
+ *
+ * Notes:
+ * Setting to place on the open-loop TXC (RGMII transmit clk)
+ * delay line, which can delay the transmited clock. This
+ * can be used if the board and/or transmitting device
+ * has not otherwise delayed the clock.
+ *
+ * A value of SETTING=0 disables the delay line. The delay
+ * line should be disabled unless the transmitter or board
+ * does not delay the clock.
+ *
+ * Note that this delay line provides only a coarse control
+ * over the delay. Generally, it can only reliably provide
+ * a delay in the range 1.25-2.5ns, which may not be adequate
+ * for some system applications.
+ *
+ * The open loop delay line selects
+ * from among a series of tap positions. Each incremental
+ * tap position adds a delay of 50ps to 135ps per tap, depending
+ * on the chip, its temperature, and the voltage.
+ * To achieve from 1.25-2.5ns of delay on the recieved
+ * clock, a fixed value of SETTING=24 may work.
+ * For more precision, we recommend the following settings
+ * based on the chip voltage:
+ *
+ * VDD SETTING
+ * -----------------------------
+ * 1.0 18
+ * 1.05 19
+ * 1.1 21
+ * 1.15 22
+ * 1.2 23
+ * 1.25 24
+ * 1.3 25
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_tx_clk_setx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the open-loop TXC delay line */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_tx_clk_setx_s cn30xx;
+ struct cvmx_asxx_tx_clk_setx_s cn31xx;
+ struct cvmx_asxx_tx_clk_setx_s cn38xx;
+ struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
+ struct cvmx_asxx_tx_clk_setx_s cn50xx;
+ struct cvmx_asxx_tx_clk_setx_s cn58xx;
+ struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
+} cvmx_asxx_tx_clk_setx_t;
+
+
+/**
+ * cvmx_asx#_tx_comp_byp
+ *
+ * ASX_TX_COMP_BYP = RGMII Clock delay setting
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_tx_comp_byp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_asxx_tx_comp_byp_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t bypass : 1; /**< Compensation bypass */
+ uint64_t pctl : 4; /**< PCTL Compensation Value (see Duke) */
+ uint64_t nctl : 4; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t bypass : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
+ struct cvmx_asxx_tx_comp_byp_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< PCTL Compensation Value (see Duke) */
+ uint64_t nctl : 4; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn38xx;
+ struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
+ struct cvmx_asxx_tx_comp_byp_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t bypass : 1; /**< Compensation bypass */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pctl : 5; /**< PCTL Compensation Value (see Duke) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t bypass : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn50xx;
+ struct cvmx_asxx_tx_comp_byp_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_13_63 : 51;
+ uint64_t pctl : 5; /**< PCTL Compensation Value (see Duke) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn58xx;
+ struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
+} cvmx_asxx_tx_comp_byp_t;
+
+
+/**
+ * cvmx_asx#_tx_hi_water#
+ *
+ * ASX_TX_HI_WATER = RGMII TX FIFO Hi WaterMark
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_tx_hi_waterx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t mark : 4; /**< TX FIFO HiWatermark to stall GMX
+ Value of 0 maps to 16
+ Reset value changed from 10 in pass1
+ Pass1 settings (assuming 125 tclk)
+ - 325-375: 12
+ - 375-437: 11
+ - 437-550: 10
+ - 550-687: 9 */
+#else
+ uint64_t mark : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_3_63 : 61;
+ uint64_t mark : 3; /**< TX FIFO HiWatermark to stall GMX
+ Value 0 maps to 8. */
+#else
+ uint64_t mark : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn38xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn58xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
+} cvmx_asxx_tx_hi_waterx_t;
+
+
+/**
+ * cvmx_asx#_tx_prt_en
+ *
+ * ASX_TX_PRT_EN = RGMII Port Enable
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asxx_tx_prt_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t prt_en : 4; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_asxx_tx_prt_en_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_3_63 : 61;
+ uint64_t prt_en : 3; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
+ struct cvmx_asxx_tx_prt_en_s cn38xx;
+ struct cvmx_asxx_tx_prt_en_s cn38xxp2;
+ struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
+ struct cvmx_asxx_tx_prt_en_s cn58xx;
+ struct cvmx_asxx_tx_prt_en_s cn58xxp1;
+} cvmx_asxx_tx_prt_en_t;
+
+
+/**
+ * cvmx_asx0_dbg_data_drv
+ *
+ * ASX_DBG_DATA_DRV
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asx0_dbg_data_drv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t pctl : 5; /**< These bits control the driving strength of the dbg
+ interface. */
+ uint64_t nctl : 4; /**< These bits control the driving strength of the dbg
+ interface. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 5;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_asx0_dbg_data_drv_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< These bits control the driving strength of the dbg
+ interface. */
+ uint64_t nctl : 4; /**< These bits control the driving strength of the dbg
+ interface. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn38xx;
+ struct cvmx_asx0_dbg_data_drv_cn38xx cn38xxp2;
+ struct cvmx_asx0_dbg_data_drv_s cn58xx;
+ struct cvmx_asx0_dbg_data_drv_s cn58xxp1;
+} cvmx_asx0_dbg_data_drv_t;
+
+
+/**
+ * cvmx_asx0_dbg_data_enable
+ *
+ * ASX_DBG_DATA_ENABLE
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_asx0_dbg_data_enable_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< A 1->0 transistion, turns the dbg interface OFF. */
+#else
+ uint64_t en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asx0_dbg_data_enable_s cn38xx;
+ struct cvmx_asx0_dbg_data_enable_s cn38xxp2;
+ struct cvmx_asx0_dbg_data_enable_s cn58xx;
+ struct cvmx_asx0_dbg_data_enable_s cn58xxp1;
+} cvmx_asx0_dbg_data_enable_t;
+
+
+/**
+ * cvmx_ciu_bist
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_bist_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t bist : 4; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu_bist_s cn30xx;
+ struct cvmx_ciu_bist_s cn31xx;
+ struct cvmx_ciu_bist_s cn38xx;
+ struct cvmx_ciu_bist_s cn38xxp2;
+ struct cvmx_ciu_bist_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t bist : 2; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_bist_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_3_63 : 61;
+ uint64_t bist : 3; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_bist_cn52xx cn52xxp1;
+ struct cvmx_ciu_bist_s cn56xx;
+ struct cvmx_ciu_bist_s cn56xxp1;
+ struct cvmx_ciu_bist_s cn58xx;
+ struct cvmx_ciu_bist_s cn58xxp1;
+} cvmx_ciu_bist_t;
+
+
+/**
+ * cvmx_ciu_dint
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_dint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t dint : 16; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu_dint_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t dint : 1; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_dint_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t dint : 2; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_dint_s cn38xx;
+ struct cvmx_ciu_dint_s cn38xxp2;
+ struct cvmx_ciu_dint_cn31xx cn50xx;
+ struct cvmx_ciu_dint_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t dint : 4; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_dint_cn52xx cn52xxp1;
+ struct cvmx_ciu_dint_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t dint : 12; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_dint_cn56xx cn56xxp1;
+ struct cvmx_ciu_dint_s cn58xx;
+ struct cvmx_ciu_dint_s cn58xxp1;
+} cvmx_ciu_dint_t;
+
+
+/**
+ * cvmx_ciu_fuse
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_fuse_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t fuse : 16; /**< Physical PP is present */
+#else
+ uint64_t fuse : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu_fuse_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t fuse : 1; /**< Physical PP is present */
+#else
+ uint64_t fuse : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_fuse_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t fuse : 2; /**< Physical PP is present */
+#else
+ uint64_t fuse : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_fuse_s cn38xx;
+ struct cvmx_ciu_fuse_s cn38xxp2;
+ struct cvmx_ciu_fuse_cn31xx cn50xx;
+ struct cvmx_ciu_fuse_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t fuse : 4; /**< Physical PP is present */
+#else
+ uint64_t fuse : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_fuse_cn52xx cn52xxp1;
+ struct cvmx_ciu_fuse_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t fuse : 12; /**< Physical PP is present */
+#else
+ uint64_t fuse : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_fuse_cn56xx cn56xxp1;
+ struct cvmx_ciu_fuse_s cn58xx;
+ struct cvmx_ciu_fuse_s cn58xxp1;
+} cvmx_ciu_fuse_t;
+
+
+/**
+ * cvmx_ciu_gstop
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_gstop_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t gstop : 1; /**< GSTOP bit */
+#else
+ uint64_t gstop : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_gstop_s cn30xx;
+ struct cvmx_ciu_gstop_s cn31xx;
+ struct cvmx_ciu_gstop_s cn38xx;
+ struct cvmx_ciu_gstop_s cn38xxp2;
+ struct cvmx_ciu_gstop_s cn50xx;
+ struct cvmx_ciu_gstop_s cn52xx;
+ struct cvmx_ciu_gstop_s cn52xxp1;
+ struct cvmx_ciu_gstop_s cn56xx;
+ struct cvmx_ciu_gstop_s cn56xxp1;
+ struct cvmx_ciu_gstop_s cn58xx;
+ struct cvmx_ciu_gstop_s cn58xxp1;
+} cvmx_ciu_gstop_t;
+
+
+/**
+ * cvmx_ciu_int#_en0
+ *
+ * Notes:
+ * CIU_INT0_EN0: PP0 /IP2
+ * CIU_INT1_EN0: PP0 /IP3
+ * ...
+ * CIU_INT6_EN0: PP3/IP2
+ * CIU_INT7_EN0: PP3/IP3
+ * (hole)
+ * CIU_INT32_EN0: PCI /IP
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en0_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_intx_en0_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_intx_en0_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_intx_en0_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_en0_cn30xx cn50xx;
+ struct cvmx_ciu_intx_en0_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_en0_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en0_cn38xx cn58xx;
+ struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
+} cvmx_ciu_intx_en0_t;
+
+
+/**
+ * cvmx_ciu_int#_en0_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTx_EN0 register
+ * (Pass2 ONLY)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_w1c_s cn56xx;
+ struct cvmx_ciu_intx_en0_w1c_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+} cvmx_ciu_intx_en0_w1c_t;
+
+
+/**
+ * cvmx_ciu_int#_en0_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTx_EN0 register
+ * (Pass2 ONLY)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_w1s_s cn56xx;
+ struct cvmx_ciu_intx_en0_w1s_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+} cvmx_ciu_intx_en0_w1s_t;
+
+
+/**
+ * cvmx_ciu_int#_en1
+ *
+ * Notes:
+ * @verbatim
+ * PPx/IP2 will be raised when...
+ *
+ * n = x*2
+ * PPx/IP2 = |([CIU_INT_SUM1, CIU_INTn_SUM0] & [CIU_INTn_EN1, CIU_INTn_EN0])
+ *
+ * PPx/IP3 will be raised when...
+ *
+ * n = x*2 + 1
+ * PPx/IP3 = |([CIU_INT_SUM1, CIU_INTn_SUM0] & [CIU_INTn_EN1, CIU_INTn_EN0])
+ *
+ * PCI/INT will be raised when...
+ *
+ * PCI/INT = |([CIU_INT_SUM1, CIU_INT32_SUM0] & [CIU_INT32_EN1, CIU_INT32_EN0])
+ * @endverbatim
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en1_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t wdog : 1; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_intx_en1_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t wdog : 2; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_intx_en1_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_intx_en1_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_en1_cn31xx cn50xx;
+ struct cvmx_ciu_intx_en1_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_cn52xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_19_63 : 45;
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn52xxp1;
+ struct cvmx_ciu_intx_en1_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en1_cn38xx cn58xx;
+ struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
+} cvmx_ciu_intx_en1_t;
+
+
+/**
+ * cvmx_ciu_int#_en1_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTx_EN1 register
+ * (Pass2 ONLY)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en1_w1c_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_w1c_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_w1c_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+} cvmx_ciu_intx_en1_w1c_t;
+
+
+/**
+ * cvmx_ciu_int#_en1_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTx_EN1 register
+ * (Pass2 ONLY)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en1_w1s_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_w1s_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_w1s_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+} cvmx_ciu_intx_en1_w1s_t;
+
+
+/**
+ * cvmx_ciu_int#_en4_0
+ *
+ * Notes:
+ * CIU_INT0_EN4_0: PP0 /IP4
+ * CIU_INT1_EN4_0: PP1 /IP4
+ * ...
+ * CIU_INT11_EN4_0: PP11 /IP4
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_0_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_en4_0_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en4_0_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
+} cvmx_ciu_intx_en4_0_t;
+
+
+/**
+ * cvmx_ciu_int#_en4_0_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTx_EN4_0 register
+ * (Pass2 ONLY)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_w1c_s cn56xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+} cvmx_ciu_intx_en4_0_w1c_t;
+
+
+/**
+ * cvmx_ciu_int#_en4_0_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTx_EN4_0 register
+ * (Pass2 ONLY)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_w1s_s cn56xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+} cvmx_ciu_intx_en4_0_w1s_t;
+
+
+/**
+ * cvmx_ciu_int#_en4_1
+ *
+ * Notes:
+ * PPx/IP4 will be raised when...
+ * PPx/IP4 = |([CIU_INT_SUM1, CIU_INTx_SUM4] & [CIU_INTx_EN4_1, CIU_INTx_EN4_0])
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_1_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t wdog : 2; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_intx_en4_1_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_cn52xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_19_63 : 45;
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn52xxp1;
+ struct cvmx_ciu_intx_en4_1_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en4_1_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
+} cvmx_ciu_intx_en4_1_t;
+
+
+/**
+ * cvmx_ciu_int#_en4_1_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTx_EN4_1 register
+ * (Pass2 ONLY)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_1_w1c_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+} cvmx_ciu_intx_en4_1_w1c_t;
+
+
+/**
+ * cvmx_ciu_int#_en4_1_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTx_EN4_1 register
+ * (Pass2 ONLY)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_1_w1s_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+} cvmx_ciu_intx_en4_1_w1s_t;
+
+
+/**
+ * cvmx_ciu_int#_sum0
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_sum0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt
+ KEY_ZERO will be set when the external ZERO_KEYS
+ pin is sampled high. KEY_ZERO is cleared by SW */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM0 where x=0-31.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_sum0_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM0 where x=0-1.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_intx_sum0_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM0 where x=0-3.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_intx_sum0_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt
+ KEY_ZERO will be set when the external ZERO_KEYS
+ pin is sampled high. KEY_ZERO is cleared by SW */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM0 where x=0-31.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_intx_sum0_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_sum0_cn30xx cn50xx;
+ struct cvmx_ciu_intx_sum0_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_INT_SUM1 bit is set and corresponding
+ enable bit in CIU_INTx_EN is set, where x
+ is the same as x in this CIU_INTx_SUM0.
+ PPs use CIU_INTx_SUM0 where x=0-7.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3
+ Note that WDOG_SUM only summarizes the SUM/EN1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ Refer to "Receiving Message-Signalled
+ Interrupts" in the PCIe chapter of the spec */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the PCIe chapter of the spec */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-7
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_sum0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_sum0_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt
+ KEY_ZERO will be set when the external ZERO_KEYS
+ pin is sampled high. KEY_ZERO is cleared by SW */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM0 where x=0-23.
+ PCI uses the CIU_INTx_SUM0 where x=32.
+ Even INTx registers report WDOG to IP2
+ Odd INTx registers report WDOG to IP3 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ Refer to "Receiving Message-Signalled
+ Interrupts" in the PCIe chapter of the spec */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the PCIe chapter of the spec */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-23
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_sum0_cn38xx cn58xx;
+ struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1;
+} cvmx_ciu_intx_sum0_t;
+
+
+/**
+ * cvmx_ciu_int#_sum4
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_sum4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt
+ KEY_ZERO will be set when the external ZERO_KEYS
+ pin is sampled high. KEY_ZERO is cleared by SW */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ These registers report WDOG to IP4 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_sum4_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ PPs use CIU_INTx_SUM4 where x=0-1. */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_intx_sum4_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< SUM1&EN4_1 summary bit
+ This read-only bit reads as a one whenever any
+ CIU_INT_SUM1 bit is set and corresponding
+ enable bit in CIU_INTx_EN4_1 is set, where x
+ is the same as x in this CIU_INTx_SUM4.
+ PPs use CIU_INTx_SUM4 for IP4, where x=PPid.
+ Note that WDOG_SUM only summarizes the SUM/EN4_1
+ result and does not have a corresponding enable
+ bit, so does not directly contribute to
+ interrupts. */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ Refer to "Receiving Message-Signalled
+ Interrupts" in the PCIe chapter of the spec */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the PCIe chapter of the spec */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-3
+ [33] is the or of <31:16>
+ [32] is the or of <15:0> */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_sum4_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_sum4_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt
+ KEY_ZERO will be set when the external ZERO_KEYS
+ pin is sampled high. KEY_ZERO is cleared by SW */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ These registers report WDOG to IP4 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ Refer to "Receiving Message-Signalled
+ Interrupts" in the PCIe chapter of the spec */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D
+ Refer to "Receiving Emulated INTA/INTB/
+ INTC/INTD" in the PCIe chapter of the spec */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-11
+ [33] is the or of <31:16>
+ [32] is the or of <15:0> */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_sum4_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_sum4_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt
+ KEY_ZERO will be set when the external ZERO_KEYS
+ pin is sampled high. KEY_ZERO is cleared by SW */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t wdog_sum : 1; /**< Watchdog summary
+ These registers report WDOG to IP4 */
+ uint64_t pci_msi : 4; /**< PCI MSI
+ [43] is the or of <63:48>
+ [42] is the or of <47:32>
+ [41] is the or of <31:16>
+ [40] is the or of <15:0> */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31
+ [33] is the or of <31:16>
+ [32] is the or of <15:0>
+ Two PCI internal interrupts for entry 32
+ CIU_PCI_INTA */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts
+ 1 bit/group. A copy of the R/W1C bit in the POW. */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t wdog_sum : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1;
+} cvmx_ciu_intx_sum4_t;
+
+
+/**
+ * cvmx_ciu_int_sum1
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_int_sum1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< 16 watchdog interrupts */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_ciu_int_sum1_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t wdog : 1; /**< 1 watchdog interrupt */
+#else
+ uint64_t wdog : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_int_sum1_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t wdog : 2; /**< 2 watchdog interrupts */
+#else
+ uint64_t wdog : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_int_sum1_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< 16 watchdog interrupts */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_int_sum1_cn38xx cn38xxp2;
+ struct cvmx_ciu_int_sum1_cn31xx cn50xx;
+ struct cvmx_ciu_int_sum1_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< 4 watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_int_sum1_cn52xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_19_63 : 45;
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< 4 watchdog interrupts */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn52xxp1;
+ struct cvmx_ciu_int_sum1_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< 12 watchdog interrupts */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_int_sum1_cn56xx cn56xxp1;
+ struct cvmx_ciu_int_sum1_cn38xx cn58xx;
+ struct cvmx_ciu_int_sum1_cn38xx cn58xxp1;
+} cvmx_ciu_int_sum1_t;
+
+
+/**
+ * cvmx_ciu_mbox_clr#
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_mbox_clrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t bits : 32; /**< On writes, clr corresponding bit in MBOX register
+ on reads, return the MBOX register */
+#else
+ uint64_t bits : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu_mbox_clrx_s cn30xx;
+ struct cvmx_ciu_mbox_clrx_s cn31xx;
+ struct cvmx_ciu_mbox_clrx_s cn38xx;
+ struct cvmx_ciu_mbox_clrx_s cn38xxp2;
+ struct cvmx_ciu_mbox_clrx_s cn50xx;
+ struct cvmx_ciu_mbox_clrx_s cn52xx;
+ struct cvmx_ciu_mbox_clrx_s cn52xxp1;
+ struct cvmx_ciu_mbox_clrx_s cn56xx;
+ struct cvmx_ciu_mbox_clrx_s cn56xxp1;
+ struct cvmx_ciu_mbox_clrx_s cn58xx;
+ struct cvmx_ciu_mbox_clrx_s cn58xxp1;
+} cvmx_ciu_mbox_clrx_t;
+
+
+/**
+ * cvmx_ciu_mbox_set#
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_mbox_setx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t bits : 32; /**< On writes, set corresponding bit in MBOX register
+ on reads, return the MBOX register */
+#else
+ uint64_t bits : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_ciu_mbox_setx_s cn30xx;
+ struct cvmx_ciu_mbox_setx_s cn31xx;
+ struct cvmx_ciu_mbox_setx_s cn38xx;
+ struct cvmx_ciu_mbox_setx_s cn38xxp2;
+ struct cvmx_ciu_mbox_setx_s cn50xx;
+ struct cvmx_ciu_mbox_setx_s cn52xx;
+ struct cvmx_ciu_mbox_setx_s cn52xxp1;
+ struct cvmx_ciu_mbox_setx_s cn56xx;
+ struct cvmx_ciu_mbox_setx_s cn56xxp1;
+ struct cvmx_ciu_mbox_setx_s cn58xx;
+ struct cvmx_ciu_mbox_setx_s cn58xxp1;
+} cvmx_ciu_mbox_setx_t;
+
+
+/**
+ * cvmx_ciu_nmi
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_nmi_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t nmi : 16; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu_nmi_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t nmi : 1; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_nmi_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t nmi : 2; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_nmi_s cn38xx;
+ struct cvmx_ciu_nmi_s cn38xxp2;
+ struct cvmx_ciu_nmi_cn31xx cn50xx;
+ struct cvmx_ciu_nmi_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t nmi : 4; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_nmi_cn52xx cn52xxp1;
+ struct cvmx_ciu_nmi_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t nmi : 12; /**< Send NMI pulse to PP vector */
+#else
+ uint64_t nmi : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_nmi_cn56xx cn56xxp1;
+ struct cvmx_ciu_nmi_s cn58xx;
+ struct cvmx_ciu_nmi_s cn58xxp1;
+} cvmx_ciu_nmi_t;
+
+
+/**
+ * cvmx_ciu_pci_inta
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_pci_inta_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t intr : 2; /**< PCI interrupt
+ These bits are observed in CIU_INT32_SUM0<33:32> */
+#else
+ uint64_t intr : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_ciu_pci_inta_s cn30xx;
+ struct cvmx_ciu_pci_inta_s cn31xx;
+ struct cvmx_ciu_pci_inta_s cn38xx;
+ struct cvmx_ciu_pci_inta_s cn38xxp2;
+ struct cvmx_ciu_pci_inta_s cn50xx;
+ struct cvmx_ciu_pci_inta_s cn52xx;
+ struct cvmx_ciu_pci_inta_s cn52xxp1;
+ struct cvmx_ciu_pci_inta_s cn56xx;
+ struct cvmx_ciu_pci_inta_s cn56xxp1;
+ struct cvmx_ciu_pci_inta_s cn58xx;
+ struct cvmx_ciu_pci_inta_s cn58xxp1;
+} cvmx_ciu_pci_inta_t;
+
+
+/**
+ * cvmx_ciu_pp_dbg
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_pp_dbg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t ppdbg : 16; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu_pp_dbg_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t ppdbg : 1; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_pp_dbg_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t ppdbg : 2; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_pp_dbg_s cn38xx;
+ struct cvmx_ciu_pp_dbg_s cn38xxp2;
+ struct cvmx_ciu_pp_dbg_cn31xx cn50xx;
+ struct cvmx_ciu_pp_dbg_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t ppdbg : 4; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_pp_dbg_cn52xx cn52xxp1;
+ struct cvmx_ciu_pp_dbg_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t ppdbg : 12; /**< Debug[DM] value for each PP
+ whether the PP's are in debug mode or not */
+#else
+ uint64_t ppdbg : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1;
+ struct cvmx_ciu_pp_dbg_s cn58xx;
+ struct cvmx_ciu_pp_dbg_s cn58xxp1;
+} cvmx_ciu_pp_dbg_t;
+
+
+/**
+ * cvmx_ciu_pp_poke#
+ *
+ * Notes:
+ * Any write to a CIU_PP_POKE register clears any pending interrupt generated
+ * by the associated watchdog, resets the CIU_WDOG[STATE] field, and set
+ * CIU_WDOG[CNT] to be (CIU_WDOG[LEN] << 8).
+ *
+ * Reads to this register will return the associated CIU_WDOG register.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_pp_pokex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t poke : 64; /**< Reserved */
+#else
+ uint64_t poke : 64;
+#endif
+ } s;
+ struct cvmx_ciu_pp_pokex_s cn30xx;
+ struct cvmx_ciu_pp_pokex_s cn31xx;
+ struct cvmx_ciu_pp_pokex_s cn38xx;
+ struct cvmx_ciu_pp_pokex_s cn38xxp2;
+ struct cvmx_ciu_pp_pokex_s cn50xx;
+ struct cvmx_ciu_pp_pokex_s cn52xx;
+ struct cvmx_ciu_pp_pokex_s cn52xxp1;
+ struct cvmx_ciu_pp_pokex_s cn56xx;
+ struct cvmx_ciu_pp_pokex_s cn56xxp1;
+ struct cvmx_ciu_pp_pokex_s cn58xx;
+ struct cvmx_ciu_pp_pokex_s cn58xxp1;
+} cvmx_ciu_pp_pokex_t;
+
+
+/**
+ * cvmx_ciu_pp_rst
+ *
+ * Contains the reset control for each PP. Value of '1' will hold a PP in reset, '0' will release.
+ * Resets to 0xffff when PCI boot is enabled, 0xfffe otherwise.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_pp_rst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t rst : 15; /**< PP Rst for PP's 15-1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 15;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu_pp_rst_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_pp_rst_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t rst : 1; /**< PP Rst for PP1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_pp_rst_s cn38xx;
+ struct cvmx_ciu_pp_rst_s cn38xxp2;
+ struct cvmx_ciu_pp_rst_cn31xx cn50xx;
+ struct cvmx_ciu_pp_rst_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t rst : 3; /**< PP Rst for PP's 11-1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 3;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_pp_rst_cn52xx cn52xxp1;
+ struct cvmx_ciu_pp_rst_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t rst : 11; /**< PP Rst for PP's 11-1 */
+ uint64_t rst0 : 1; /**< PP Rst for PP0
+ depends on standalone mode */
+#else
+ uint64_t rst0 : 1;
+ uint64_t rst : 11;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_pp_rst_cn56xx cn56xxp1;
+ struct cvmx_ciu_pp_rst_s cn58xx;
+ struct cvmx_ciu_pp_rst_s cn58xxp1;
+} cvmx_ciu_pp_rst_t;
+
+
+/**
+ * cvmx_ciu_qlm_dcok
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_qlm_dcok_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t qlm_dcok : 4; /**< Re-assert dcok for each QLM. The value in this
+ field is "anded" with the pll_dcok pin and then
+ sent to each QLM (0..3). */
+#else
+ uint64_t qlm_dcok : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_ciu_qlm_dcok_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t qlm_dcok : 2; /**< Re-assert dcok for each QLM. The value in this
+ field is "anded" with the pll_dcok pin and then
+ sent to each QLM (0..3). */
+#else
+ uint64_t qlm_dcok : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_qlm_dcok_cn52xx cn52xxp1;
+ struct cvmx_ciu_qlm_dcok_s cn56xx;
+ struct cvmx_ciu_qlm_dcok_s cn56xxp1;
+} cvmx_ciu_qlm_dcok_t;
+
+
+/**
+ * cvmx_ciu_qlm_jtgc
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_qlm_jtgc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_11_63 : 53;
+ uint64_t clk_div : 3; /**< Clock divider for QLM JTAG operations. eclk is
+ divided by 2^(CLK_DIV + 2) */
+ uint64_t reserved_6_7 : 2;
+ uint64_t mux_sel : 2; /**< Selects which QLM JTAG shift out is shifted into
+ the QLM JTAG shift register: CIU_QLM_JTGD[SHFT_REG] */
+ uint64_t bypass : 4; /**< Selects which QLM JTAG shift chains are bypassed
+ by the QLM JTAG data register (CIU_QLM_JTGD) (one
+ bit per QLM) */
+#else
+ uint64_t bypass : 4;
+ uint64_t mux_sel : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t clk_div : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_ciu_qlm_jtgc_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_11_63 : 53;
+ uint64_t clk_div : 3; /**< Clock divider for QLM JTAG operations. eclk is
+ divided by 2^(CLK_DIV + 2) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t mux_sel : 1; /**< Selects which QLM JTAG shift out is shifted into
+ the QLM JTAG shift register: CIU_QLM_JTGD[SHFT_REG] */
+ uint64_t reserved_2_3 : 2;
+ uint64_t bypass : 2; /**< Selects which QLM JTAG shift chains are bypassed
+ by the QLM JTAG data register (CIU_QLM_JTGD) (one
+ bit per QLM) */
+#else
+ uint64_t bypass : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t mux_sel : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t clk_div : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1;
+ struct cvmx_ciu_qlm_jtgc_s cn56xx;
+ struct cvmx_ciu_qlm_jtgc_s cn56xxp1;
+} cvmx_ciu_qlm_jtgc_t;
+
+
+/**
+ * cvmx_ciu_qlm_jtgd
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_qlm_jtgd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when
+ op completes) */
+ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when
+ op completes) */
+ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when
+ op completes) */
+ uint64_t reserved_44_60 : 17;
+ uint64_t select : 4; /**< Selects which QLM JTAG shift chains the JTAG
+ operations are performed on */
+ uint64_t reserved_37_39 : 3;
+ uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */
+ uint64_t shft_reg : 32; /**< QLM JTAG shift register */
+#else
+ uint64_t shft_reg : 32;
+ uint64_t shft_cnt : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t select : 4;
+ uint64_t reserved_44_60 : 17;
+ uint64_t update : 1;
+ uint64_t shift : 1;
+ uint64_t capture : 1;
+#endif
+ } s;
+ struct cvmx_ciu_qlm_jtgd_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when
+ op completes) */
+ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when
+ op completes) */
+ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when
+ op completes) */
+ uint64_t reserved_42_60 : 19;
+ uint64_t select : 2; /**< Selects which QLM JTAG shift chains the JTAG
+ operations are performed on */
+ uint64_t reserved_37_39 : 3;
+ uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */
+ uint64_t shft_reg : 32; /**< QLM JTAG shift register */
+#else
+ uint64_t shft_reg : 32;
+ uint64_t shft_cnt : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t select : 2;
+ uint64_t reserved_42_60 : 19;
+ uint64_t update : 1;
+ uint64_t shift : 1;
+ uint64_t capture : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_qlm_jtgd_cn52xx cn52xxp1;
+ struct cvmx_ciu_qlm_jtgd_s cn56xx;
+ struct cvmx_ciu_qlm_jtgd_cn56xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when
+ op completes) */
+ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when
+ op completes) */
+ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when
+ op completes) */
+ uint64_t reserved_37_60 : 24;
+ uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */
+ uint64_t shft_reg : 32; /**< QLM JTAG shift register */
+#else
+ uint64_t shft_reg : 32;
+ uint64_t shft_cnt : 5;
+ uint64_t reserved_37_60 : 24;
+ uint64_t update : 1;
+ uint64_t shift : 1;
+ uint64_t capture : 1;
+#endif
+ } cn56xxp1;
+} cvmx_ciu_qlm_jtgd_t;
+
+
+/**
+ * cvmx_ciu_soft_bist
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_soft_bist_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_bist : 1; /**< Run BIST on soft reset. */
+#else
+ uint64_t soft_bist : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_soft_bist_s cn30xx;
+ struct cvmx_ciu_soft_bist_s cn31xx;
+ struct cvmx_ciu_soft_bist_s cn38xx;
+ struct cvmx_ciu_soft_bist_s cn38xxp2;
+ struct cvmx_ciu_soft_bist_s cn50xx;
+ struct cvmx_ciu_soft_bist_s cn52xx;
+ struct cvmx_ciu_soft_bist_s cn52xxp1;
+ struct cvmx_ciu_soft_bist_s cn56xx;
+ struct cvmx_ciu_soft_bist_s cn56xxp1;
+ struct cvmx_ciu_soft_bist_s cn58xx;
+ struct cvmx_ciu_soft_bist_s cn58xxp1;
+} cvmx_ciu_soft_bist_t;
+
+
+/**
+ * cvmx_ciu_soft_prst
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_soft_prst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_3_63 : 61;
+ uint64_t host64 : 1; /**< PCX Host Mode Device Capability (0=32b/1=64b) */
+ uint64_t npi : 1; /**< When PCI soft reset is asserted, also reset the
+ NPI and PNI logic */
+ uint64_t soft_prst : 1; /**< Reset the PCI bus. Only works when Octane is
+ configured as a HOST. When OCTEON is a PCI host
+ (i.e. when PCI_HOST_MODE = 1), This controls
+ PCI_RST_L. Refer to section 10.11.1. */
+#else
+ uint64_t soft_prst : 1;
+ uint64_t npi : 1;
+ uint64_t host64 : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+ struct cvmx_ciu_soft_prst_s cn30xx;
+ struct cvmx_ciu_soft_prst_s cn31xx;
+ struct cvmx_ciu_soft_prst_s cn38xx;
+ struct cvmx_ciu_soft_prst_s cn38xxp2;
+ struct cvmx_ciu_soft_prst_s cn50xx;
+ struct cvmx_ciu_soft_prst_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_prst : 1; /**< Reset the PCI bus. Only works when Octane is
+ configured as a HOST. When OCTEON is a PCI host
+ (i.e. when PCI_HOST_MODE = 1), This controls
+ PCI_RST_L. Refer to section 10.11.1. */
+#else
+ uint64_t soft_prst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn52xxp1;
+ struct cvmx_ciu_soft_prst_cn52xx cn56xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn56xxp1;
+ struct cvmx_ciu_soft_prst_s cn58xx;
+ struct cvmx_ciu_soft_prst_s cn58xxp1;
+} cvmx_ciu_soft_prst_t;
+
+
+/**
+ * cvmx_ciu_soft_prst1
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_soft_prst1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_prst : 1; /**< Reset the PCI bus. Only works when Octane is
+ configured as a HOST. When OCTEON is a PCI host
+ (i.e. when PCI_HOST_MODE = 1), This controls
+ PCI_RST_L. Refer to section 10.11.1. */
+#else
+ uint64_t soft_prst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_soft_prst1_s cn52xx;
+ struct cvmx_ciu_soft_prst1_s cn52xxp1;
+ struct cvmx_ciu_soft_prst1_s cn56xx;
+ struct cvmx_ciu_soft_prst1_s cn56xxp1;
+} cvmx_ciu_soft_prst1_t;
+
+
+/**
+ * cvmx_ciu_soft_rst
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_soft_rst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< Resets Octeon
+ When soft reseting Octeon from a remote PCI host,
+ always read CIU_SOFT_RST (and wait for result)
+ before writing SOFT_RST to '1'. */
+#else
+ uint64_t soft_rst : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_soft_rst_s cn30xx;
+ struct cvmx_ciu_soft_rst_s cn31xx;
+ struct cvmx_ciu_soft_rst_s cn38xx;
+ struct cvmx_ciu_soft_rst_s cn38xxp2;
+ struct cvmx_ciu_soft_rst_s cn50xx;
+ struct cvmx_ciu_soft_rst_s cn52xx;
+ struct cvmx_ciu_soft_rst_s cn52xxp1;
+ struct cvmx_ciu_soft_rst_s cn56xx;
+ struct cvmx_ciu_soft_rst_s cn56xxp1;
+ struct cvmx_ciu_soft_rst_s cn58xx;
+ struct cvmx_ciu_soft_rst_s cn58xxp1;
+} cvmx_ciu_soft_rst_t;
+
+
+/**
+ * cvmx_ciu_tim#
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_timx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_37_63 : 27;
+ uint64_t one_shot : 1; /**< One-shot mode */
+ uint64_t len : 36; /**< Timeout length in core clock cycles
+ Periodic interrupts will occur every LEN+1 core
+ clock cycles when ONE_SHOT==0
+ Timer disabled when LEN==0 */
+#else
+ uint64_t len : 36;
+ uint64_t one_shot : 1;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } s;
+ struct cvmx_ciu_timx_s cn30xx;
+ struct cvmx_ciu_timx_s cn31xx;
+ struct cvmx_ciu_timx_s cn38xx;
+ struct cvmx_ciu_timx_s cn38xxp2;
+ struct cvmx_ciu_timx_s cn50xx;
+ struct cvmx_ciu_timx_s cn52xx;
+ struct cvmx_ciu_timx_s cn52xxp1;
+ struct cvmx_ciu_timx_s cn56xx;
+ struct cvmx_ciu_timx_s cn56xxp1;
+ struct cvmx_ciu_timx_s cn58xx;
+ struct cvmx_ciu_timx_s cn58xxp1;
+} cvmx_ciu_timx_t;
+
+
+/**
+ * cvmx_ciu_wdog#
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_ciu_wdogx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_46_63 : 18;
+ uint64_t gstopen : 1; /**< GSTOPEN */
+ uint64_t dstop : 1; /**< DSTOP */
+ uint64_t cnt : 24; /**< Number of 256-cycle intervals until next watchdog
+ expiration. Cleared on write to associated
+ CIU_PP_POKE register. */
+ uint64_t len : 16; /**< Watchdog time expiration length
+ The 16 bits of LEN represent the most significant
+ bits of a 24 bit decrementer that decrements
+ every 256 cycles.
+ LEN must be set > 0 */
+ uint64_t state : 2; /**< Watchdog state
+ number of watchdog time expirations since last
+ PP poke. Cleared on write to associated
+ CIU_PP_POKE register. */
+ uint64_t mode : 2; /**< Watchdog mode
+ 0 = Off
+ 1 = Interrupt Only
+ 2 = Interrupt + NMI
+ 3 = Interrupt + NMI + Soft-Reset */
+#else
+ uint64_t mode : 2;
+ uint64_t state : 2;
+ uint64_t len : 16;
+ uint64_t cnt : 24;
+ uint64_t dstop : 1;
+ uint64_t gstopen : 1;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } s;
+ struct cvmx_ciu_wdogx_s cn30xx;
+ struct cvmx_ciu_wdogx_s cn31xx;
+ struct cvmx_ciu_wdogx_s cn38xx;
+ struct cvmx_ciu_wdogx_s cn38xxp2;
+ struct cvmx_ciu_wdogx_s cn50xx;
+ struct cvmx_ciu_wdogx_s cn52xx;
+ struct cvmx_ciu_wdogx_s cn52xxp1;
+ struct cvmx_ciu_wdogx_s cn56xx;
+ struct cvmx_ciu_wdogx_s cn56xxp1;
+ struct cvmx_ciu_wdogx_s cn58xx;
+ struct cvmx_ciu_wdogx_s cn58xxp1;
+} cvmx_ciu_wdogx_t;
+
+
+/**
+ * cvmx_dbg_data
+ *
+ * DBG_DATA = Debug Data Register
+ *
+ * Value returned on the debug-data lines from the RSLs
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dbg_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_23_63 : 41;
+ uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_dbg_data_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_31_63 : 33;
+ uint64_t pll_mul : 3; /**< pll_mul pins sampled at DCOK assertion */
+ uint64_t reserved_23_27 : 5;
+ uint64_t c_mul : 5; /**< Core PLL multiplier sampled at DCOK assertion */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t reserved_23_27 : 5;
+ uint64_t pll_mul : 3;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn30xx;
+ struct cvmx_dbg_data_cn30xx cn31xx;
+ struct cvmx_dbg_data_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_29_63 : 35;
+ uint64_t d_mul : 4; /**< D_MUL pins sampled on DCOK assertion */
+ uint64_t dclk_mul2 : 1; /**< Should always be set for fast DDR-II operation */
+ uint64_t cclk_div2 : 1; /**< Should always be clear for fast core clock */
+ uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t cclk_div2 : 1;
+ uint64_t dclk_mul2 : 1;
+ uint64_t d_mul : 4;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn38xx;
+ struct cvmx_dbg_data_cn38xx cn38xxp2;
+ struct cvmx_dbg_data_cn30xx cn50xx;
+ struct cvmx_dbg_data_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_29_63 : 35;
+ uint64_t rem : 6; /**< Remaining debug_select pins sampled at DCOK */
+ uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */
+ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
+ debug select value. */
+ uint64_t data : 17; /**< Value on the debug data lines. */
+#else
+ uint64_t data : 17;
+ uint64_t dsel_ext : 1;
+ uint64_t c_mul : 5;
+ uint64_t rem : 6;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn58xx;
+ struct cvmx_dbg_data_cn58xx cn58xxp1;
+} cvmx_dbg_data_t;
+
+
+/**
+ * cvmx_dfa_bst0
+ *
+ * DFA_BST0 = DFA Bist Status
+ *
+ * Description:
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_bst0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t rdf : 16; /**< Bist Results for RDF[3:0] RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t pdf : 16; /**< Bist Results for PDF[3:0] RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t pdf : 16;
+ uint64_t rdf : 16;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_dfa_bst0_s cn31xx;
+ struct cvmx_dfa_bst0_s cn38xx;
+ struct cvmx_dfa_bst0_s cn38xxp2;
+ struct cvmx_dfa_bst0_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t rdf : 4; /**< Bist Results for RDF[3:0] RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_4_15 : 12;
+ uint64_t pdf : 4; /**< Bist Results for PDF[3:0] RAM(s)
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t pdf : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t rdf : 4;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn58xx;
+ struct cvmx_dfa_bst0_cn58xx cn58xxp1;
+} cvmx_dfa_bst0_t;
+
+
+/**
+ * cvmx_dfa_bst1
+ *
+ * DFA_BST1 = DFA Bist Status
+ *
+ * Description:
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_bst1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_23_63 : 41;
+ uint64_t crq : 1; /**< Bist Results for CRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ifu : 1; /**< Bist Results for IFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gfu : 1; /**< Bist Results for GFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t drf : 1; /**< Bist Results for DRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t crf : 1; /**< Bist Results for CRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p0_bwb : 1; /**< Bist Results for P0_BWB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p1_bwb : 1; /**< Bist Results for P1_BWB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p0_brf : 8; /**< Bist Results for P0_BRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p1_brf : 8; /**< Bist Results for P1_BRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t p1_brf : 8;
+ uint64_t p0_brf : 8;
+ uint64_t p1_bwb : 1;
+ uint64_t p0_bwb : 1;
+ uint64_t crf : 1;
+ uint64_t drf : 1;
+ uint64_t gfu : 1;
+ uint64_t ifu : 1;
+ uint64_t crq : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_dfa_bst1_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_23_63 : 41;
+ uint64_t crq : 1; /**< Bist Results for CRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ifu : 1; /**< Bist Results for IFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gfu : 1; /**< Bist Results for GFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t drf : 1; /**< Bist Results for DRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t crf : 1; /**< Bist Results for CRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_0_17 : 18;
+#else
+ uint64_t reserved_0_17 : 18;
+ uint64_t crf : 1;
+ uint64_t drf : 1;
+ uint64_t gfu : 1;
+ uint64_t ifu : 1;
+ uint64_t crq : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } cn31xx;
+ struct cvmx_dfa_bst1_s cn38xx;
+ struct cvmx_dfa_bst1_s cn38xxp2;
+ struct cvmx_dfa_bst1_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_23_63 : 41;
+ uint64_t crq : 1; /**< Bist Results for CRQ RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t ifu : 1; /**< Bist Results for IFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t gfu : 1; /**< Bist Results for GFU RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t reserved_19_19 : 1;
+ uint64_t crf : 1; /**< Bist Results for CRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p0_bwb : 1; /**< Bist Results for P0_BWB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p1_bwb : 1; /**< Bist Results for P1_BWB RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p0_brf : 8; /**< Bist Results for P0_BRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+ uint64_t p1_brf : 8; /**< Bist Results for P1_BRF RAM
+ - 0: GOOD (or bist in progress/never run)
+ - 1: BAD */
+#else
+ uint64_t p1_brf : 8;
+ uint64_t p0_brf : 8;
+ uint64_t p1_bwb : 1;
+ uint64_t p0_bwb : 1;
+ uint64_t crf : 1;
+ uint64_t reserved_19_19 : 1;
+ uint64_t gfu : 1;
+ uint64_t ifu : 1;
+ uint64_t crq : 1;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } cn58xx;
+ struct cvmx_dfa_bst1_cn58xx cn58xxp1;
+} cvmx_dfa_bst1_t;
+
+
+/**
+ * cvmx_dfa_cfg
+ *
+ * Specify the RSL base addresses for the block
+ *
+ * DFA_CFG = DFA Configuration
+ *
+ * Description:
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t nrpl_ena : 1; /**< When set, allows the per-node replication feature to be
+ enabled.
+ In 36-bit mode: The IWORD0[31:30]=SNREPL field AND
+ bits [21:20] of the Next Node ptr are used in generating
+ the next node address (see OCTEON HRM - DFA Chapter for
+ psuedo-code of DTE next node address generation).
+ NOTE: When NRPL_ENA=1 and IWORD0[TY]=1(36b mode),
+ (regardless of IWORD0[NRPLEN]), the Resultant Word1+
+ [[47:44],[23:20]] = Next Node's [27:20] bits. This allows
+ SW to use the RESERVED bits of the final node for SW
+ caching. Also, if required, SW will use [22:21]=Node
+ Replication to re-start the same graph walk(if graph
+ walk prematurely terminated (ie: DATA_GONE).
+ In 18-bit mode: The IWORD0[31:30]=SNREPL field AND
+ bit [16:14] of the Next Node ptr are used in generating
+ the next node address (see OCTEON HRM - DFA Chapter for
+ psuedo-code of DTE next node address generation).
+ If (IWORD0[NREPLEN]=1 and DFA_CFG[NRPL_ENA]=1) [
+ If next node ptr[16] is set [
+ next node ptr[15:14] indicates the next node repl
+ next node ptr[13:0] indicates the position of the
+ node relative to the first normal node (i.e.
+ IWORD3[Msize] must be added to get the final node)
+ ]
+ else If next node ptr[16] is not set [
+ next node ptr[15:0] indicates the next node id
+ next node repl = 0
+ ]
+ ]
+ NOTE: For 18b node replication, MAX node space=64KB(2^16)
+ is used in detecting terminal node space(see HRM for full
+ description).
+ NOTE: The DFA graphs MUST BE built/written to DFA LLM memory
+ aware of the "per-node" replication. */
+ uint64_t nxor_ena : 1; /**< When set, allows the DTE Instruction IWORD0[NXOREN]
+ to be used to enable/disable the per-node address 'scramble'
+ of the LLM address to lessen the effects of bank conflicts.
+ If IWORD0[NXOREN] is also set, then:
+ In 36-bit mode: The node_Id[7:0] 8-bit value is XORed
+ against the LLM address addr[9:2].
+ In 18-bit mode: The node_id[6:0] 7-bit value is XORed
+ against the LLM address addr[8:2]. (note: we don't address
+ scramble outside the mode's node space).
+ NOTE: The DFA graphs MUST BE built/written to DFA LLM memory
+ aware of the "per-node" address scramble.
+ NOTE: The address 'scramble' ocurs for BOTH DFA LLM graph
+ read/write operations. */
+ uint64_t gxor_ena : 1; /**< When set, the DTE Instruction IWORD0[GXOR]
+ field is used to 'scramble' the LLM address
+ to lessen the effects of bank conflicts.
+ In 36-bit mode: The GXOR[7:0] 8-bit value is XORed
+ against the LLM address addr[9:2].
+ In 18-bit mode: GXOR[6:0] 7-bit value is XORed against
+ the LLM address addr[8:2]. (note: we don't address
+ scramble outside the mode's node space)
+ NOTE: The DFA graphs MUST BE built/written to DFA LLM memory
+ aware of the "per-graph" address scramble.
+ NOTE: The address 'scramble' ocurs for BOTH DFA LLM graph
+ read/write operations. */
+ uint64_t sarb : 1; /**< DFA Source Arbiter Mode
+ Selects the arbitration mode used to select DFA
+ requests issued from either CP2 or the DTE (NCB-CSR
+ or DFA HW engine).
+ - 0: Fixed Priority [Highest=CP2, Lowest=DTE]
+ - 1: Round-Robin
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t sarb : 1;
+ uint64_t gxor_ena : 1;
+ uint64_t nxor_ena : 1;
+ uint64_t nrpl_ena : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_dfa_cfg_s cn38xx;
+ struct cvmx_dfa_cfg_cn38xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t sarb : 1; /**< DFA Source Arbiter Mode
+ Selects the arbitration mode used to select DFA
+ requests issued from either CP2 or the DTE (NCB-CSR
+ or DFA HW engine).
+ - 0: Fixed Priority [Highest=CP2, Lowest=DTE]
+ - 1: Round-Robin
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t sarb : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn38xxp2;
+ struct cvmx_dfa_cfg_s cn58xx;
+ struct cvmx_dfa_cfg_s cn58xxp1;
+} cvmx_dfa_cfg_t;
+
+
+/**
+ * cvmx_dfa_dbell
+ *
+ * DFA_DBELL = DFA Doorbell Register
+ *
+ * Description:
+ * NOTE: To write to the DFA_DBELL register, a device would issue an IOBST directed at the DFA with addr[34:33]=2'b00.
+ * To read the DFA_DBELL register, a device would issue an IOBLD64 directed at the DFA with addr[34:33]=2'b00.
+ *
+ * NOTE: If DFA_CFG[DTECLKDIS]=1 (DFA-DTE clocks disabled), reads/writes to the DFA_DBELL register do not take effect.
+ * NOTE: If FUSE[120]="DFA DTE disable" is blown, reads/writes to the DFA_DBELL register do not take effect.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_dbell_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t dbell : 20; /**< Represents the cumulative total of pending
+ DFA instructions which SW has previously written
+ into the DFA Instruction FIFO (DIF) in main memory.
+ Each DFA instruction contains a fixed size 32B
+ instruction word which is executed by the DFA HW.
+ The DBL register can hold up to 1M-1 (2^20-1)
+ pending DFA instruction requests.
+ During a read (by SW), the 'most recent' contents
+ of the DFA_DBELL register are returned at the time
+ the NCB-INB bus is driven.
+ NOTE: Since DFA HW updates this register, its
+ contents are unpredictable in SW. */
+#else
+ uint64_t dbell : 20;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_dfa_dbell_s cn31xx;
+ struct cvmx_dfa_dbell_s cn38xx;
+ struct cvmx_dfa_dbell_s cn38xxp2;
+ struct cvmx_dfa_dbell_s cn58xx;
+ struct cvmx_dfa_dbell_s cn58xxp1;
+} cvmx_dfa_dbell_t;
+
+
+/**
+ * cvmx_dfa_ddr2_addr
+ *
+ * DFA_DDR2_ADDR = DFA DDR2 fclk-domain Memory Address Config Register
+ *
+ *
+ * Description: The following registers are used to compose the DFA's DDR2 address into ROW/COL/BNK
+ * etc.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t rdimm_ena : 1; /**< If there is a need to insert a register chip on the
+ system (the equivalent of a registered DIMM) to
+ provide better setup for the command and control bits
+ turn this mode on.
+ RDIMM_ENA
+ 0 Registered Mode OFF
+ 1 Registered Mode ON */
+ uint64_t num_rnks : 2; /**< NUM_RNKS is programmed based on how many ranks there
+ are in the system. This needs to be programmed correctly
+ regardless of whether we are in RNK_LO mode or not.
+ NUM_RNKS \# of Ranks
+ 0 1
+ 1 2
+ 2 4
+ 3 RESERVED */
+ uint64_t rnk_lo : 1; /**< When this mode is turned on, consecutive addresses
+ outside the bank boundary
+ are programmed to go to different ranks in order to
+ minimize bank conflicts. It is useful in 4-bank DDR2
+ parts based memory to extend out the \#physical banks
+ available and minimize bank conflicts.
+ On 8 bank ddr2 parts, this mode is not very useful
+ because this mode does come with
+ a penalty which is that every successive reads that
+ cross rank boundary will need a 1 cycle bubble
+ inserted to prevent bus turnaround conflicts.
+ RNK_LO
+ 0 - OFF
+ 1 - ON */
+ uint64_t num_colrows : 3; /**< NUM_COLROWS is used to set the MSB of the ROW_ADDR
+ and the LSB of RANK address when not in RNK_LO mode.
+ Calculate the sum of \#COL and \#ROW and program the
+ controller appropriately
+ RANK_LSB \#COLs + \#ROWs
+ ------------------------------
+ - 000: 22
+ - 001: 23
+ - 010: 24
+ - 011: 25
+ - 100-111: RESERVED */
+ uint64_t num_cols : 2; /**< The Long word address that the controller receives
+ needs to be converted to Row, Col, Rank and Bank
+ addresses depending on the memory part's micro arch.
+ NUM_COL tells the controller how many colum bits
+ there are and the controller uses this info to map
+ the LSB of the row address
+ - 00: num_cols = 9
+ - 01: num_cols = 10
+ - 10: num_cols = 11
+ - 11: RESERVED */
+#else
+ uint64_t num_cols : 2;
+ uint64_t num_colrows : 3;
+ uint64_t rnk_lo : 1;
+ uint64_t num_rnks : 2;
+ uint64_t rdimm_ena : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_addr_s cn31xx;
+} cvmx_dfa_ddr2_addr_t;
+
+
+/**
+ * cvmx_dfa_ddr2_bus
+ *
+ * DFA_DDR2_BUS = DFA DDR Bus Activity Counter
+ *
+ *
+ * Description: This counter counts \# cycles that the memory bus is doing a read/write/command
+ * Useful to benchmark the bus utilization as a ratio of
+ * \#Cycles of Data Transfer/\#Cycles since init or
+ * \#Cycles of Data Transfer/\#Cycles that memory controller is active
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_bus_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_47_63 : 17;
+ uint64_t bus_cnt : 47; /**< Counter counts the \# cycles of Data transfer */
+#else
+ uint64_t bus_cnt : 47;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_bus_s cn31xx;
+} cvmx_dfa_ddr2_bus_t;
+
+
+/**
+ * cvmx_dfa_ddr2_cfg
+ *
+ * DFA_DDR2_CFG = DFA DDR2 fclk-domain Memory Configuration \#0 Register
+ *
+ * Description:
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_41_63 : 23;
+ uint64_t trfc : 5; /**< Establishes tRFC(from DDR2 data sheets) in \# of
+ 4 fclk intervals.
+ General Equation:
+ TRFC(csr) = ROUNDUP[tRFC(data-sheet-ns)/(4 * fclk(ns))]
+ Example:
+ tRFC(data-sheet-ns) = 127.5ns
+ Operational Frequency: 533MHz DDR rate
+ [fclk=266MHz(3.75ns)]
+ Then:
+ TRFC(csr) = ROUNDUP[127.5ns/(4 * 3.75ns)]
+ = 9 */
+ uint64_t mrs_pgm : 1; /**< When clear, the HW initialization sequence fixes
+ some of the *MRS register bit definitions.
+ EMRS:
+ A[14:13] = 0 RESERVED
+ A[12] = 0 Output Buffers Enabled (FIXED)
+ A[11] = 0 RDQS Disabled (FIXED)
+ A[10] = 0 DQSn Enabled (FIXED)
+ A[9:7] = 0 OCD Not supported (FIXED)
+ A[6] = 0 RTT Disabled (FIXED)
+ A[5:3]=DFA_DDR2_TMG[ADDLAT] (if DFA_DDR2_TMG[POCAS]=1)
+ Additive LATENCY (Programmable)
+ A[2]=0 RTT Disabled (FIXED)
+ A[1]=DFA_DDR2_TMG[DIC] (Programmable)
+ A[0] = 0 DLL Enabled (FIXED)
+ MRS:
+ A[14:13] = 0 RESERVED
+ A[12] = 0 Fast Active Power Down Mode (FIXED)
+ A[11:9] = DFA_DDR2_TMG[TWR](Programmable)
+ A[8] = 1 DLL Reset (FIXED)
+ A[7] = 0 Test Mode (FIXED)
+ A[6:4]=DFA_DDR2_TMG[CASLAT] CAS LATENCY (Programmable)
+ A[3] = 0 Burst Type(must be 0:Sequential) (FIXED)
+ A[2:0] = 2 Burst Length=4 (must be 0:Sequential) (FIXED)
+ When set, the HW initialization sequence sources
+ the DFA_DDR2_MRS, DFA_DDR2_EMRS registers which are
+ driven onto the DFA_A[] pins. (this allows the MRS/EMRS
+ fields to be completely programmable - however care
+ must be taken by software).
+ This mode is useful for customers who wish to:
+ 1) override the FIXED definitions(above), or
+ 2) Use a "clamshell mode" of operation where the
+ address bits(per rank) are swizzled on the
+ board to reduce stub lengths for optimal
+ frequency operation.
+ Use this in combination with DFA_DDR2_CFG[RNK_MSK]
+ to specify the INIT sequence for each of the 4
+ supported ranks. */
+ uint64_t fpip : 3; /**< Early Fill Programmable Pipe [\#fclks]
+ This field dictates the \#fclks prior to the arrival
+ of fill data(in fclk domain), to start the 'early' fill
+ command pipe (in the eclk domain) so as to minimize the
+ overall fill latency.
+ The programmable early fill command signal is synchronized
+ into the eclk domain, where it is used to pull data out of
+ asynchronous RAM as fast as possible.
+ NOTE: A value of FPIP=0 is the 'safest' setting and will
+ result in the early fill command pipe starting in the
+ same cycle as the fill data.
+ General Equation: (for FPIP)
+ FPIP <= MIN[6, (ROUND_DOWN[6/EF_RATIO] + 1)]
+ where:
+ EF_RATIO = ECLK/FCLK Ratio [eclk(MHz)/fclk(MHz)]
+ Example: FCLK=200MHz/ECLK=600MHz
+ FPIP = MIN[6, (ROUND_DOWN[6/(600/200))] + 1)]
+ FPIP <= 3 */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ref_int : 13; /**< Refresh Interval (represented in \#of fclk
+ increments).
+ Each refresh interval will generate a single
+ auto-refresh command sequence which implicitly targets
+ all banks within the device:
+ Example: For fclk=200MHz(5ns)/400MHz(DDR):
+ trefint(ns) = [tREFI(max)=3.9us = 3900ns [datasheet]
+ REF_INT = ROUND_DOWN[(trefint/fclk)]
+ = ROUND_DOWN[(3900ns/5ns)]
+ = 780 fclks (0x30c)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t tskw : 2; /**< Board Skew (represented in \#fclks)
+ Represents additional board skew of DQ/DQS.
+ - 00: board-skew = 0 fclk
+ - 01: board-skew = 1 fclk
+ - 10: board-skew = 2 fclk
+ - 11: board-skew = 3 fclk
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t rnk_msk : 4; /**< Controls the CS_N[3:0] during a) a HW Initialization
+ sequence (triggered by DFA_DDR2_CFG[INIT]) or
+ b) during a normal refresh sequence. If
+ the RNK_MSK[x]=1, the corresponding CS_N[x] is driven.
+ NOTE: This is required for DRAM used in a
+ clamshell configuration, since the address lines
+ carry Mode Register write data that is unique
+ per rank(or clam). In a clamshell configuration,
+ the N3K DFA_A[x] pin may be tied into Clam#0's A[x]
+ and also into Clam#1's 'mirrored' address bit A[y]
+ (eg: Clam0 sees A[5] and Clam1 sees A[15]).
+ To support clamshell designs, SW must initiate
+ separate HW init sequences each unique rank address
+ mapping. Before each HW init sequence is triggered,
+ SW must preload the DFA_DDR2_MRS/EMRS registers with
+ the data that will be driven onto the A[14:0] wires
+ during the EMRS/MRS mode register write(s).
+ NOTE: After the final HW initialization sequence has
+ been triggered, SW must wait 64K eclks before writing
+ the RNK_MSK[3:0] field = 3'b1111 (so that CS_N[3:0]
+ is driven during refresh sequences in normal operation.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t silo_qc : 1; /**< Enables Quarter Cycle move of the Rd sampling window */
+ uint64_t silo_hc : 1; /**< A combination of SILO_HC, SILO_QC and TSKW
+ specifies the positioning of the sampling strobe
+ when receiving read data back from DDR2. This is
+ done to offset any board trace induced delay on
+ the DQ and DQS which inherently makes these
+ asynchronous with respect to the internal clk of
+ controller. TSKW moves this sampling window by
+ integer cycles. SILO_QC and HC move this quarter
+ and half a cycle respectively. */
+ uint64_t sil_lat : 2; /**< Silo Latency (\#fclks): On reads, determines how many
+ additional fclks to wait (on top of CASLAT+1) before
+ pulling data out of the padring silos used for time
+ domain boundary crossing.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t bprch : 1; /**< Tristate Enable (back porch) (\#fclks)
+ On reads, allows user to control the shape of the
+ tristate disable back porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t fprch : 1; /**< Tristate Enable (front porch) (\#fclks)
+ On reads, allows user to control the shape of the
+ tristate disable front porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t init : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for the LLM Memory Port is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Enable memory port
+ a) PRTENA=1
+ 2) Wait 200us (to ensure a stable clock
+ to the DDR2) - as per DDR2 spec.
+ 3) Write a '1' to the INIT which
+ will initiate a hardware initialization
+ sequence.
+ NOTE: After writing a '1', SW must wait 64K eclk
+ cycles to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_DDR2* registers.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t prtena : 1; /**< Enable DFA Memory
+ When enabled, this bit lets N3K be the default
+ driver for DFA-LLM memory port. */
+#else
+ uint64_t prtena : 1;
+ uint64_t init : 1;
+ uint64_t fprch : 1;
+ uint64_t bprch : 1;
+ uint64_t sil_lat : 2;
+ uint64_t silo_hc : 1;
+ uint64_t silo_qc : 1;
+ uint64_t rnk_msk : 4;
+ uint64_t tskw : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t ref_int : 13;
+ uint64_t reserved_29_31 : 3;
+ uint64_t fpip : 3;
+ uint64_t mrs_pgm : 1;
+ uint64_t trfc : 5;
+ uint64_t reserved_41_63 : 23;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_cfg_s cn31xx;
+} cvmx_dfa_ddr2_cfg_t;
+
+
+/**
+ * cvmx_dfa_ddr2_comp
+ *
+ * DFA_DDR2_COMP = DFA DDR2 I/O PVT Compensation Configuration
+ *
+ *
+ * Description: The following are registers to program the DDR2 PLL and DLL
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_comp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t dfa__pctl : 4; /**< DFA DDR pctl from compensation circuit
+ Internal DBG only */
+ uint64_t dfa__nctl : 4; /**< DFA DDR nctl from compensation circuit
+ Internal DBG only */
+ uint64_t reserved_9_55 : 47;
+ uint64_t pctl_csr : 4; /**< Compensation control bits */
+ uint64_t nctl_csr : 4; /**< Compensation control bits */
+ uint64_t comp_bypass : 1; /**< Compensation Bypass */
+#else
+ uint64_t comp_bypass : 1;
+ uint64_t nctl_csr : 4;
+ uint64_t pctl_csr : 4;
+ uint64_t reserved_9_55 : 47;
+ uint64_t dfa__nctl : 4;
+ uint64_t dfa__pctl : 4;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_comp_s cn31xx;
+} cvmx_dfa_ddr2_comp_t;
+
+
+/**
+ * cvmx_dfa_ddr2_emrs
+ *
+ * DFA_DDR2_EMRS = DDR2 EMRS Register(s) EMRS1[14:0], EMRS1_OCD[14:0]
+ * Description: This register contains the data driven onto the Address[14:0] lines during DDR INIT
+ * To support Clamshelling (where N3K DFA_A[] pins are not 1:1 mapped to each clam(or rank), a HW init
+ * sequence is allowed on a "per-rank" basis. Care must be taken in the values programmed into these
+ * registers during the HW initialization sequence (see N3K specific restrictions in notes below).
+ * DFA_DDR2_CFG[MRS_PGM] must be 1 to support this feature.
+ *
+ * Notes:
+ * For DDR-II please consult your device's data sheet for further details:
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_emrs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_31_63 : 33;
+ uint64_t emrs1_ocd : 15; /**< Memory Address[14:0] during "EMRS1 (OCD Calibration)"
+ step \#12a "EMRS OCD Default Command" A[9:7]=111
+ of DDR2 HW initialization sequence.
+ (See JEDEC DDR2 specification (JESD79-2):
+ Power Up and initialization sequence).
+ A[14:13] = 0, RESERVED
+ A[12] = 0, Output Buffers Enabled
+ A[11] = 0, RDQS Disabled (we do not support RDQS)
+ A[10] = 0, DQSn Enabled
+ A[9:7] = 7, OCD Calibration Mode Default
+ A[6] = 0, ODT Disabled
+ A[5:3]=DFA_DDR2_TMG[ADDLAT] Additive LATENCY (Default 0)
+ A[2]=0 Termination Res RTT (ODT off Default)
+ [A6,A2] = 0 -> ODT Disabled
+ 1 -> 75 ohm; 2 -> 150 ohm; 3 - Reserved
+ A[1]=0 Normal Output Driver Imp mode
+ (1 - weak ie., 60% of normal drive strength)
+ A[0] = 0 DLL Enabled */
+ uint64_t reserved_15_15 : 1;
+ uint64_t emrs1 : 15; /**< Memory Address[14:0] during:
+ a) Step \#7 "EMRS1 to enable DLL (A[0]=0)"
+ b) Step \#12b "EMRS OCD Calibration Mode Exit"
+ steps of DDR2 HW initialization sequence.
+ (See JEDEC DDR2 specification (JESD79-2): Power Up and
+ initialization sequence).
+ A[14:13] = 0, RESERVED
+ A[12] = 0, Output Buffers Enabled
+ A[11] = 0, RDQS Disabled (we do not support RDQS)
+ A[10] = 0, DQSn Enabled
+ A[9:7] = 0, OCD Calibration Mode exit/maintain
+ A[6] = 0, ODT Disabled
+ A[5:3]=DFA_DDR2_TMG[ADDLAT] Additive LATENCY (Default 0)
+ A[2]=0 Termination Res RTT (ODT off Default)
+ [A6,A2] = 0 -> ODT Disabled
+ 1 -> 75 ohm; 2 -> 150 ohm; 3 - Reserved
+ A[1]=0 Normal Output Driver Imp mode
+ (1 - weak ie., 60% of normal drive strength)
+ A[0] = 0 DLL Enabled */
+#else
+ uint64_t emrs1 : 15;
+ uint64_t reserved_15_15 : 1;
+ uint64_t emrs1_ocd : 15;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_emrs_s cn31xx;
+} cvmx_dfa_ddr2_emrs_t;
+
+
+/**
+ * cvmx_dfa_ddr2_fcnt
+ *
+ * DFA_DDR2_FCNT = DFA FCLK Counter
+ *
+ *
+ * Description: This FCLK cycle counter gets going after memory has been initialized
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_fcnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_47_63 : 17;
+ uint64_t fcyc_cnt : 47; /**< Counter counts FCLK cycles or \# cycles that the memory
+ controller has requests queued up depending on FCNT_MODE
+ If FCNT_MODE = 0, this counter counts the \# FCLK cycles
+ If FCNT_MODE = 1, this counter counts the \# cycles the
+ controller is active with memory requests. */
+#else
+ uint64_t fcyc_cnt : 47;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_fcnt_s cn31xx;
+} cvmx_dfa_ddr2_fcnt_t;
+
+
+/**
+ * cvmx_dfa_ddr2_mrs
+ *
+ * DFA_DDR2_MRS = DDR2 MRS Register(s) MRS_DLL[14:0], MRS[14:0]
+ * Description: This register contains the data driven onto the Address[14:0] lines during DDR INIT
+ * To support Clamshelling (where N3K DFA_A[] pins are not 1:1 mapped to each clam(or rank), a HW init
+ * sequence is allowed on a "per-rank" basis. Care must be taken in the values programmed into these
+ * registers during the HW initialization sequence (see N3K specific restrictions in notes below).
+ * DFA_DDR2_CFG[MRS_PGM] must be 1 to support this feature.
+ *
+ * Notes:
+ * For DDR-II please consult your device's data sheet for further details:
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_mrs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_31_63 : 33;
+ uint64_t mrs : 15; /**< Memory Address[14:0] during "MRS without resetting
+ DLL A[8]=0" step of HW initialization sequence.
+ (See JEDEC DDR2 specification (JESD79-2): Power Up
+ and initialization sequence - Step \#11).
+ A[14:13] = 0, RESERVED
+ A[12] = 0, Fast Active Power Down Mode
+ A[11:9] = DFA_DDR2_TMG[TWR]
+ A[8] = 0, for DLL Reset
+ A[7] =0 Test Mode (must be 0 for normal operation)
+ A[6:4]=DFA_DDR2_TMG[CASLAT] CAS LATENCY (default 4)
+ A[3]=0 Burst Type(must be 0:Sequential)
+ A[2:0]=2 Burst Length=4(default) */
+ uint64_t reserved_15_15 : 1;
+ uint64_t mrs_dll : 15; /**< Memory Address[14:0] during "MRS for DLL_RESET A[8]=1"
+ step of HW initialization sequence.
+ (See JEDEC DDR2 specification (JESD79-2): Power Up
+ and initialization sequence - Step \#8).
+ A[14:13] = 0, RESERVED
+ A[12] = 0, Fast Active Power Down Mode
+ A[11:9] = DFA_DDR2_TMG[TWR]
+ A[8] = 1, for DLL Reset
+ A[7] = 0 Test Mode (must be 0 for normal operation)
+ A[6:4]=DFA_DDR2_TMG[CASLAT] CAS LATENCY (default 4)
+ A[3] = 0 Burst Type(must be 0:Sequential)
+ A[2:0] = 2 Burst Length=4(default) */
+#else
+ uint64_t mrs_dll : 15;
+ uint64_t reserved_15_15 : 1;
+ uint64_t mrs : 15;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_mrs_s cn31xx;
+} cvmx_dfa_ddr2_mrs_t;
+
+
+/**
+ * cvmx_dfa_ddr2_opt
+ *
+ * DFA_DDR2_OPT = DFA DDR2 Optimization Registers
+ *
+ *
+ * Description: The following are registers to tweak certain parameters to boost performance
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_opt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t max_read_batch : 5; /**< Maximum number of consecutive read to service before
+ allowing write to interrupt. */
+ uint64_t max_write_batch : 5; /**< Maximum number of consecutive writes to service before
+ allowing reads to interrupt. */
+#else
+ uint64_t max_write_batch : 5;
+ uint64_t max_read_batch : 5;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_opt_s cn31xx;
+} cvmx_dfa_ddr2_opt_t;
+
+
+/**
+ * cvmx_dfa_ddr2_pll
+ *
+ * DFA_DDR2_PLL = DFA DDR2 PLL and DLL Configuration
+ *
+ *
+ * Description: The following are registers to program the DDR2 PLL and DLL
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_pll_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t pll_setting : 17; /**< Internal Debug Use Only */
+ uint64_t reserved_32_46 : 15;
+ uint64_t setting90 : 5; /**< Contains the setting of DDR DLL; Internal DBG only */
+ uint64_t reserved_21_26 : 6;
+ uint64_t dll_setting : 5; /**< Contains the open loop setting value for the DDR90 delay
+ line. */
+ uint64_t dll_byp : 1; /**< DLL Bypass. When set, the DDR90 DLL is bypassed and
+ the DLL behaves in Open Loop giving a fixed delay
+ set by DLL_SETTING */
+ uint64_t qdll_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
+ erst deassertion will reset the DDR 90 DLL. Allow
+ 200 micro seconds for Lock before DDR Init. */
+ uint64_t bw_ctl : 4; /**< Internal Use Only - for Debug */
+ uint64_t bw_upd : 1; /**< Internal Use Only - for Debug */
+ uint64_t pll_div2 : 1; /**< PLL Output is further divided by 2. Useful for slow
+ fclk frequencies where the PLL may be out of range. */
+ uint64_t reserved_7_7 : 1;
+ uint64_t pll_ratio : 5; /**< Bits <6:2> sets the clk multiplication ratio
+ If the fclk frequency desired is less than 260MHz
+ (lower end saturation point of the pll), write 2x
+ the ratio desired in this register and set PLL_DIV2 */
+ uint64_t pll_bypass : 1; /**< PLL Bypass. Uses the ref_clk without multiplication. */
+ uint64_t pll_init : 1; /**< Need a 0 to 1 pulse on this CSR to get the DFA
+ Clk Generator Started. Write this register before
+ starting anything. Allow 200 uS for PLL Lock before
+ doing anything. */
+#else
+ uint64_t pll_init : 1;
+ uint64_t pll_bypass : 1;
+ uint64_t pll_ratio : 5;
+ uint64_t reserved_7_7 : 1;
+ uint64_t pll_div2 : 1;
+ uint64_t bw_upd : 1;
+ uint64_t bw_ctl : 4;
+ uint64_t qdll_ena : 1;
+ uint64_t dll_byp : 1;
+ uint64_t dll_setting : 5;
+ uint64_t reserved_21_26 : 6;
+ uint64_t setting90 : 5;
+ uint64_t reserved_32_46 : 15;
+ uint64_t pll_setting : 17;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_pll_s cn31xx;
+} cvmx_dfa_ddr2_pll_t;
+
+
+/**
+ * cvmx_dfa_ddr2_tmg
+ *
+ * DFA_DDR2_TMG = DFA DDR2 Memory Timing Config Register
+ *
+ *
+ * Description: The following are registers to program the DDR2 memory timing parameters.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ddr2_tmg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_47_63 : 17;
+ uint64_t fcnt_mode : 1; /**< If FCNT_MODE = 0, this counter counts the \# FCLK cycles
+ If FCNT_MODE = 1, this counter counts the \# cycles the
+ controller is active with memory requests. */
+ uint64_t cnt_clr : 1; /**< Clears the FCLK Cyc & Bus Util counter */
+ uint64_t cavmipo : 1; /**< RESERVED */
+ uint64_t ctr_rst : 1; /**< Reset oneshot pulse for refresh counter & Perf counters
+ SW should first write this field to a one to clear
+ & then write to a zero for normal operation */
+ uint64_t odt_rtt : 2; /**< DDR2 Termination Resistor Setting
+ These two bits are loaded into the RTT
+ portion of the EMRS register bits A6 & A2. If DDR2's
+ termination (for the memory's DQ/DQS/DM pads) is not
+ desired, set it to 00. If it is, chose between
+ 01 for 75 ohm and 10 for 150 ohm termination.
+ 00 = ODT Disabled
+ 01 = 75 ohm Termination
+ 10 = 150 ohm Termination
+ 11 = 50 ohm Termination */
+ uint64_t dqsn_ena : 1; /**< For DDR-II Mode, DIC[1] is used to load into EMRS
+ bit 10 - DQSN Enable/Disable field. By default, we
+ program the DDR's to drive the DQSN also. Set it to
+ 1 if DQSN should be Hi-Z.
+ 0 - DQSN Enable
+ 1 - DQSN Disable */
+ uint64_t dic : 1; /**< Drive Strength Control:
+ For DDR-I/II Mode, DIC[0] is
+ loaded into the Extended Mode Register (EMRS) A1 bit
+ during initialization. (see DDR-I data sheet EMRS
+ description)
+ 0 = Normal
+ 1 = Reduced */
+ uint64_t r2r_slot : 1; /**< A 1 on this register will force the controller to
+ slot a bubble between every reads */
+ uint64_t tfaw : 5; /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1
+ Four Access Window time. Relevant only in
+ 8-bank parts.
+ TFAW = 5'b0 for DDR2-4bank
+ TFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1 in DDR2-8bank */
+ uint64_t twtr : 4; /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)]
+ Last Wr Data to Rd Command time.
+ (Represented in fclk cycles)
+ TYP=15ns
+ - 0000: RESERVED
+ - 0001: 1
+ - ...
+ - 0111: 7
+ - 1000-1111: RESERVED */
+ uint64_t twr : 3; /**< DDR Write Recovery time (tWR). Last Wr Brst to Prech
+ This is not a direct encoding of the value. Its
+ programmed as below per DDR2 spec. The decimal number
+ on the right is RNDUP(tWR(ns) / clkFreq)
+ TYP=15ns
+ - 000: RESERVED
+ - 001: 2
+ - 010: 3
+ - 011: 4
+ - 100: 5
+ - 101: 6
+ - 110-111: RESERVED */
+ uint64_t trp : 4; /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)]
+ (Represented in fclk cycles)
+ TYP=15ns
+ - 0000: RESERVED
+ - 0001: 1
+ - ...
+ - 0111: 7
+ - 1000-1111: RESERVED
+ When using parts with 8 banks (DFA_CFG->MAX_BNK
+ is 1), load tRP cycles + 1 into this register. */
+ uint64_t tras : 5; /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)]
+ (Represented in fclk cycles)
+ TYP=45ns
+ - 00000-0001: RESERVED
+ - 00010: 2
+ - ...
+ - 10100: 20
+ - 10101-11111: RESERVED */
+ uint64_t trrd : 3; /**< tRRD cycles: ACT-ACT timing parameter for different
+ banks. (Represented in fclk cycles)
+ For DDR2, TYP=7.5ns
+ - 000: RESERVED
+ - 001: 1 tCYC
+ - 010: 2 tCYC
+ - 011: 3 tCYC
+ - 100: 4 tCYC
+ - 101: 5 tCYC
+ - 110-111: RESERVED */
+ uint64_t trcd : 4; /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)]
+ (Represented in fclk cycles)
+ TYP=15ns
+ - 0000: RESERVED
+ - 0001: 2 (2 is the smallest value allowed)
+ - 0002: 2
+ - ...
+ - 0111: 7
+ - 1110-1111: RESERVED */
+ uint64_t addlat : 3; /**< When in Posted CAS mode ADDLAT needs to be programmed
+ to tRCD-1
+ ADDLAT \#additional latency cycles
+ 000 0
+ 001 1 (tRCD = 2 fclk's)
+ 010 2 (tRCD = 3 fclk's)
+ 011 3 (tRCD = 4 fclk's)
+ 100 4 (tRCD = 5 fclk's)
+ 101 5 (tRCD = 6 fclk's)
+ 110 6 (tRCD = 7 fclk's)
+ 111 7 (tRCD = 8 fclk's) */
+ uint64_t pocas : 1; /**< Posted CAS mode. When 1, we use DDR2's Posted CAS
+ feature. When using this mode, ADDLAT needs to be
+ programmed as well */
+ uint64_t caslat : 3; /**< CAS Latency in \# fclk Cycles
+ CASLAT \# CAS latency cycles
+ 000 - 010 RESERVED
+ 011 3
+ 100 4
+ 101 5
+ 110 6
+ 111 7 */
+ uint64_t tmrd : 2; /**< tMRD Cycles
+ (Represented in fclk tCYC)
+ For DDR2, its TYP 2*tCYC)
+ - 000: RESERVED
+ - 001: 1
+ - 010: 2
+ - 011: 3 */
+ uint64_t ddr2t : 1; /**< When 2T mode is turned on, command signals are
+ setup a cycle ahead of when the CS is enabled
+ and kept for a total of 2 cycles. This mode is
+ enabled in higher speeds when there is difficulty
+ meeting setup. Performance could
+ be negatively affected in 2T mode */
+#else
+ uint64_t ddr2t : 1;
+ uint64_t tmrd : 2;
+ uint64_t caslat : 3;
+ uint64_t pocas : 1;
+ uint64_t addlat : 3;
+ uint64_t trcd : 4;
+ uint64_t trrd : 3;
+ uint64_t tras : 5;
+ uint64_t trp : 4;
+ uint64_t twr : 3;
+ uint64_t twtr : 4;
+ uint64_t tfaw : 5;
+ uint64_t r2r_slot : 1;
+ uint64_t dic : 1;
+ uint64_t dqsn_ena : 1;
+ uint64_t odt_rtt : 2;
+ uint64_t ctr_rst : 1;
+ uint64_t cavmipo : 1;
+ uint64_t cnt_clr : 1;
+ uint64_t fcnt_mode : 1;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfa_ddr2_tmg_s cn31xx;
+} cvmx_dfa_ddr2_tmg_t;
+
+
+/**
+ * cvmx_dfa_difctl
+ *
+ * DFA_DIFCTL = DFA Instruction FIFO (DIF) Control Register
+ *
+ * Description:
+ * NOTE: To write to the DFA_DIFCTL register, a device would issue an IOBST directed at the DFA with addr[34:33]=2'b11.
+ * To read the DFA_DIFCTL register, a device would issue an IOBLD64 directed at the DFA with addr[34:33]=2'b11.
+ *
+ * NOTE: This register is intended to ONLY be written once (at power-up). Any future writes could
+ * cause the DFA and FPA HW to become unpredictable.
+ *
+ * NOTE: If DFA_CFG[DTECLKDIS]=1 (DFA-DTE clocks disabled), reads/writes to the DFA_DIFCTL register do not take effect.
+ * NOTE: If FUSE[120]="DFA DTE disable" is blown, reads/writes to the DFA_DIFCTL register do not take effect.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_difctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t dwbcnt : 8; /**< Represents the \# of cache lines in the instruction
+ buffer that may be dirty and should not be
+ written-back to memory when the instruction
+ chunk is returned to the Free Page list.
+ NOTE: Typically SW will want to mark all DFA
+ Instruction memory returned to the Free Page list
+ as DWB (Don't WriteBack), therefore SW should
+ seed this register as:
+ DFA_DIFCTL[DWBCNT] = (DFA_DIFCTL[SIZE] + 4)/4 */
+ uint64_t pool : 3; /**< Represents the 3bit buffer pool-id used by DFA HW
+ when the DFA instruction chunk is recycled back
+ to the Free Page List maintained by the FPA HW
+ (once the DFA instruction has been issued). */
+ uint64_t size : 9; /**< Represents the \# of 32B instructions contained
+ within each DFA instruction chunk. At Power-on,
+ SW will seed the SIZE register with a fixed
+ chunk-size. (Must be at least 3)
+ DFA HW uses this field to determine the size
+ of each DFA instruction chunk, in order to:
+ a) determine when to read the next DFA
+ instruction chunk pointer which is
+ written by SW at the end of the current
+ DFA instruction chunk (see DFA description
+ of next chunk buffer Ptr for format).
+ b) determine when a DFA instruction chunk
+ can be returned to the Free Page List
+ maintained by the FPA HW. */
+#else
+ uint64_t size : 9;
+ uint64_t pool : 3;
+ uint64_t dwbcnt : 8;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_dfa_difctl_s cn31xx;
+ struct cvmx_dfa_difctl_s cn38xx;
+ struct cvmx_dfa_difctl_s cn38xxp2;
+ struct cvmx_dfa_difctl_s cn58xx;
+ struct cvmx_dfa_difctl_s cn58xxp1;
+} cvmx_dfa_difctl_t;
+
+
+/**
+ * cvmx_dfa_difrdptr
+ *
+ * DFA_DIFRDPTR = DFA Instruction FIFO (DIF) RDPTR Register
+ *
+ * Description:
+ * NOTE: To write to the DFA_DIFRDPTR register, a device would issue an IOBST directed at the DFA with addr[34:33]=2'b01.
+ * To read the DFA_DIFRDPTR register, a device would issue an IOBLD64 directed at the DFA with addr[34:33]=2'b01.
+ *
+ * NOTE: If DFA_CFG[DTECLKDIS]=1 (DFA-DTE clocks disabled), reads/writes to the DFA_DIFRDPTR register do not take effect.
+ * NOTE: If FUSE[120]="DFA DTE disable" is blown, reads/writes to the DFA_DIFRDPTR register do not take effect.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_difrdptr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_36_63 : 28;
+ uint64_t rdptr : 31; /**< Represents the 32B-aligned address of the current
+ instruction in the DFA Instruction FIFO in main
+ memory. The RDPTR must be seeded by software at
+ boot time, and is then maintained thereafter
+ by DFA HW.
+ During the seed write (by SW), RDPTR[6:5]=0,
+ since DFA instruction chunks must be 128B aligned.
+ During a read (by SW), the 'most recent' contents
+ of the RDPTR register are returned at the time
+ the NCB-INB bus is driven.
+ NOTE: Since DFA HW updates this register, its
+ contents are unpredictable in SW (unless
+ its guaranteed that no new DoorBell register
+ writes have occurred and the DoorBell register is
+ read as zero). */
+ uint64_t reserved_0_4 : 5;
+#else
+ uint64_t reserved_0_4 : 5;
+ uint64_t rdptr : 31;
+ uint64_t reserved_36_63 : 28;
+#endif
+ } s;
+ struct cvmx_dfa_difrdptr_s cn31xx;
+ struct cvmx_dfa_difrdptr_s cn38xx;
+ struct cvmx_dfa_difrdptr_s cn38xxp2;
+ struct cvmx_dfa_difrdptr_s cn58xx;
+ struct cvmx_dfa_difrdptr_s cn58xxp1;
+} cvmx_dfa_difrdptr_t;
+
+
+/**
+ * cvmx_dfa_eclkcfg
+ *
+ * Specify the RSL base addresses for the block
+ *
+ * DFA_ECLKCFG = DFA eclk-domain Configuration Registers
+ *
+ * Description:
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_eclkcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_19_63 : 45;
+ uint64_t sbdnum : 3; /**< SBD Debug Entry#
+ For internal use only. (DFA Scoreboard debug)
+ Selects which one of 8 DFA Scoreboard entries is
+ latched into the DFA_SBD_DBG[0-3] registers. */
+ uint64_t reserved_15_15 : 1;
+ uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe
+ For internal use only. (DFA Scoreboard debug)
+ When written with a '1', the DFA Scoreboard Debug
+ registers (DFA_SBD_DBG[0-3]) are all locked down.
+ This allows SW to lock down the contents of the entire
+ SBD for a single instant in time. All subsequent reads
+ of the DFA scoreboard registers will return the data
+ from that instant in time. */
+ uint64_t dcmode : 1; /**< DRF-CRQ/DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=CRQ/HP=DTE],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t dtmode : 1; /**< DRF-DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=DTE[15],...,HP=DTE[0]],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode
+ (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode
+ (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t imode : 1; /**< NCB-Inbound Arbiter
+ (0=FP [LP=NRQ,HP=NRP], 1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t sarb : 1; /**< DFA Source Arbiter Mode
+ Selects the arbitration mode used to select DFA requests
+ issued from either CP2 or the DTE (NCB-CSR or DFA HW engine).
+ - 0: Fixed Priority [Highest=CP2, Lowest=DTE]
+ - 1: Round-Robin
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t dteclkdis : 1; /**< DFA DTE Clock Disable
+ When SET, the DFA clocks for DTE(thread engine)
+ operation are disabled.
+ NOTE: When SET, SW MUST NEVER issue ANY operations to
+ the DFA via the NCB Bus. All DFA Operations must be
+ issued solely through the CP2 interface. */
+ uint64_t maxbnk : 1; /**< Maximum Banks per-device (used by the address mapper
+ when extracting address bits for the memory bank#.
+ - 0: 4 banks/device
+ - 1: 8 banks/device */
+ uint64_t dfa_frstn : 1; /**< Hold this 0 until the DFA DDR PLL and DLL lock
+ and then write a 1. A 1 on this register deasserts
+ the internal frst_n. Refer to DFA_DDR2_PLL registers for more
+ startup information.
+ Startup sequence if DFA interface needs to be ON:
+ After valid power up,
+ Write DFA_DDR2_PLL-> PLL_RATIO & PLL_DIV2 & PLL_BYPASS
+ to the appropriate values
+ Wait a few cycles
+ Write a 1 DFA_DDR2_PLL -> PLL_INIT
+ Wait 100 microseconds
+ Write a 1 to DFA_DDR2_PLL -> QDLL_ENA
+ Wait 10 microseconds
+ Write a 1 to this register DFA_FRSTN to pull DFA out of
+ reset
+ Now the DFA block is ready to be initialized (follow the
+ DDR init sequence). */
+#else
+ uint64_t dfa_frstn : 1;
+ uint64_t maxbnk : 1;
+ uint64_t dteclkdis : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t sarb : 1;
+ uint64_t imode : 1;
+ uint64_t qmode : 1;
+ uint64_t pmode : 1;
+ uint64_t dtmode : 1;
+ uint64_t dcmode : 1;
+ uint64_t sbdlck : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t sbdnum : 3;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_dfa_eclkcfg_s cn31xx;
+} cvmx_dfa_eclkcfg_t;
+
+
+/**
+ * cvmx_dfa_err
+ *
+ * DFA_ERR = DFA ERROR Register
+ *
+ * Description:
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_err_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_33_63 : 31;
+ uint64_t dblina : 1; /**< Doorbell Overflow Interrupt Enable bit.
+ When set, doorbell overflow conditions are reported. */
+ uint64_t dblovf : 1; /**< Doorbell Overflow detected - Status bit
+ When set, the 20b accumulated doorbell register
+ had overflowed (SW wrote too many doorbell requests).
+ If the DBLINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ NOTE: Detection of a Doorbell Register overflow
+ is a catastrophic error which may leave the DFA
+ HW in an unrecoverable state. */
+ uint64_t cp2pina : 1; /**< CP2 LW Mode Parity Error Interrupt Enable bit.
+ When set, all PP-generated LW Mode read
+ transactions which encounter a parity error (across
+ the 36b of data) are reported. */
+ uint64_t cp2perr : 1; /**< PP-CP2 Parity Error Detected - Status bit
+ When set, a parity error had been detected for a
+ PP-generated LW Mode read transaction.
+ If the CP2PINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ See also: DFA_MEMFADR CSR which contains more data
+ about the memory address/control to help isolate
+ the failure. */
+ uint64_t cp2parena : 1; /**< CP2 LW Mode Parity Error Enable
+ When set, all PP-generated LW Mode read
+ transactions which encounter a parity error (across
+ the 36b of data) are reported.
+ NOTE: This signal must only be written to a different
+ value when there are no PP-CP2 transactions
+ (preferrably during power-on software initialization). */
+ uint64_t dtepina : 1; /**< DTE Parity Error Interrupt Enable bit
+ (for 18b SIMPLE mode ONLY).
+ When set, all DTE-generated 18b SIMPLE Mode read
+ transactions which encounter a parity error (across
+ the 17b of data) are reported. */
+ uint64_t dteperr : 1; /**< DTE Parity Error Detected (for 18b SIMPLE mode ONLY)
+ When set, all DTE-generated 18b SIMPLE Mode read
+ transactions which encounter a parity error (across
+ the 17b of data) are reported. */
+ uint64_t dteparena : 1; /**< DTE Parity Error Enable (for 18b SIMPLE mode ONLY)
+ When set, all DTE-generated 18b SIMPLE Mode read
+ transactions which encounter a parity error (across
+ the 17b of data) are reported.
+ NOTE: This signal must only be written to a different
+ value when there are no DFA thread engines active
+ (preferrably during power-on). */
+ uint64_t dtesyn : 7; /**< DTE 29b ECC Failing 6bit Syndrome
+ When DTESBE or DTEDBE are set, this field contains
+ the failing 7b ECC syndrome. */
+ uint64_t dtedbina : 1; /**< DTE 29b Double Bit Error Interrupt Enable bit
+ When set, an interrupt is posted for any DTE-generated
+ 36b SIMPLE Mode read which encounters a double bit
+ error. */
+ uint64_t dtesbina : 1; /**< DTE 29b Single Bit Error Interrupt Enable bit
+ When set, an interrupt is posted for any DTE-generated
+ 36b SIMPLE Mode read which encounters a single bit
+ error (which is also corrected). */
+ uint64_t dtedbe : 1; /**< DTE 29b Double Bit Error Detected - Status bit
+ When set, a double bit error had been detected
+ for a DTE-generated 36b SIMPLE Mode read transaction.
+ The DTESYN contains the failing syndrome.
+ If the DTEDBINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ See also: DFA_MEMFADR CSR which contains more data
+ about the memory address/control to help isolate
+ the failure.
+ NOTE: DTE-generated 18b SIMPLE Mode Read transactions
+ do not participate in ECC check/correct). */
+ uint64_t dtesbe : 1; /**< DTE 29b Single Bit Error Corrected - Status bit
+ When set, a single bit error had been detected and
+ corrected for a DTE-generated 36b SIMPLE Mode read
+ transaction.
+ If the DTEDBE=0, then the DTESYN contains the
+ failing syndrome (used during correction).
+ NOTE: DTE-generated 18b SIMPLE Mode Read
+ transactions do not participate in ECC check/correct).
+ If the DTESBINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ See also: DFA_MEMFADR CSR which contains more data
+ about the memory address/control to help isolate
+ the failure. */
+ uint64_t dteeccena : 1; /**< DTE 29b ECC Enable (for 36b SIMPLE mode ONLY)
+ When set, 29b ECC is enabled on all DTE-generated
+ 36b SIMPLE Mode read transactions.
+ NOTE: This signal must only be written to a different
+ value when there are no DFA thread engines active
+ (preferrably during power-on software initialization). */
+ uint64_t cp2syn : 8; /**< PP-CP2 QW ECC Failing 8bit Syndrome
+ When CP2SBE or CP2DBE are set, this field contains
+ the failing ECC 8b syndrome.
+ Refer to CP2ECCENA. */
+ uint64_t cp2dbina : 1; /**< PP-CP2 Double Bit Error Interrupt Enable bit
+ When set, an interrupt is posted for any PP-generated
+ QW Mode read which encounters a double bit error.
+ Refer to CP2DBE. */
+ uint64_t cp2sbina : 1; /**< PP-CP2 Single Bit Error Interrupt Enable bit
+ When set, an interrupt is posted for any PP-generated
+ QW Mode read which encounters a single bit error
+ (which is also corrected).
+ Refer to CP2SBE. */
+ uint64_t cp2dbe : 1; /**< PP-CP2 Double Bit Error Detected - Status bit
+ When set, a double bit error had been detected
+ for a PP-generated QW Mode read transaction.
+ The CP2SYN contains the failing syndrome.
+ NOTE: PP-generated LW Mode Read transactions
+ do not participate in ECC check/correct).
+ Refer to CP2ECCENA.
+ If the CP2DBINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ See also: DFA_MEMFADR CSR which contains more data
+ about the memory address/control to help isolate
+ the failure. */
+ uint64_t cp2sbe : 1; /**< PP-CP2 Single Bit Error Corrected - Status bit
+ When set, a single bit error had been detected and
+ corrected for a PP-generated QW Mode read
+ transaction.
+ If the CP2DBE=0, then the CP2SYN contains the
+ failing syndrome (used during correction).
+ Refer to CP2ECCENA.
+ If the CP2SBINA had previously been enabled(set),
+ an interrupt will be posted. Software can clear
+ the interrupt by writing a 1 to this register bit.
+ See also: DFA_MEMFADR CSR which contains more data
+ about the memory address/control to help isolate
+ the failure.
+ NOTE: PP-generated LW Mode Read transactions
+ do not participate in ECC check/correct). */
+ uint64_t cp2eccena : 1; /**< PP-CP2 QW ECC Enable (for QW Mode transactions)
+ When set, 8bit QW ECC is enabled on all PP-generated
+ QW Mode read transactions, CP2SBE and
+ CP2DBE may be set, and CP2SYN may be filled.
+ NOTE: This signal must only be written to a different
+ value when there are no PP-CP2 transactions
+ (preferrably during power-on software initialization).
+ NOTE: QW refers to a 64-bit LLM Load/Store (intiated
+ by a processor core). LW refers to a 36-bit load/store. */
+#else
+ uint64_t cp2eccena : 1;
+ uint64_t cp2sbe : 1;
+ uint64_t cp2dbe : 1;
+ uint64_t cp2sbina : 1;
+ uint64_t cp2dbina : 1;
+ uint64_t cp2syn : 8;
+ uint64_t dteeccena : 1;
+ uint64_t dtesbe : 1;
+ uint64_t dtedbe : 1;
+ uint64_t dtesbina : 1;
+ uint64_t dtedbina : 1;
+ uint64_t dtesyn : 7;
+ uint64_t dteparena : 1;
+ uint64_t dteperr : 1;
+ uint64_t dtepina : 1;
+ uint64_t cp2parena : 1;
+ uint64_t cp2perr : 1;
+ uint64_t cp2pina : 1;
+ uint64_t dblovf : 1;
+ uint64_t dblina : 1;
+ uint64_t reserved_33_63 : 31;
+#endif
+ } s;
+ struct cvmx_dfa_err_s cn31xx;
+ struct cvmx_dfa_err_s cn38xx;
+ struct cvmx_dfa_err_s cn38xxp2;
+ struct cvmx_dfa_err_s cn58xx;
+ struct cvmx_dfa_err_s cn58xxp1;
+} cvmx_dfa_err_t;
+
+
+/**
+ * cvmx_dfa_memcfg0
+ *
+ * DFA_MEMCFG0 = DFA Memory Configuration
+ *
+ * Description:
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_memcfg0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t rldqck90_rst : 1; /**< RLDCK90 and RLDQK90 DLL SW Reset
+ When written with a '1' the RLDCK90 and RLDQK90 DLL are
+ in soft-reset. */
+ uint64_t rldck_rst : 1; /**< RLDCK Zero Delay DLL(Clock Generator) SW Reset
+ When written with a '1' the RLDCK zero delay DLL is in
+ soft-reset. */
+ uint64_t clkdiv : 2; /**< RLDCLK Divisor Select
+ - 0: RLDx_CK_H/L = Core Clock /2
+ - 1: RESERVED (must not be used)
+ - 2: RLDx_CK_H/L = Core Clock /3
+ - 3: RLDx_CK_H/L = Core Clock /4
+ The DFA LLM interface(s) are tied to the core clock
+ frequency through this programmable clock divisor.
+ Examples:
+ Core Clock(MHz) | DFA-LLM Clock(MHz) | CLKDIV
+ -----------------+--------------------+--------
+ 800 | 400/(800-DDR) | /2
+ 1000 | 333/(666-DDR) | /3
+ 800 | 200/(400-DDR) | /4
+ NOTE: This value MUST BE programmed BEFORE doing a
+ Hardware init sequence (see: DFA_MEMCFG0[INIT_Px] bits).
+ *** NOTE: O9N PASS1 Addition */
+ uint64_t lpp_ena : 1; /**< PP Linear Port Addressing Mode Enable
+ When enabled, PP-core LLM accesses to the lower-512MB
+ LLM address space are sent to the single DFA port
+ which is enabled. NOTE: If LPP_ENA=1, only
+ one DFA RLDRAM port may be enabled for RLDRAM accesses
+ (ie: ENA_P0 and ENA_P1 CAN NEVER BOTH be set).
+ PP-core LLM accesses to the upper-512MB LLM address
+ space are sent to the other 'disabled' DFA port.
+ SW RESTRICTION: If LPP_ENA=1, then only one DFA port
+ may be enabled for RLDRAM accesses (ie: ENA_P0 and
+ ENA_P1 CAN NEVER BOTH be set).
+ NOTE: This bit is used to allow PP-Core LLM accesses to a
+ disabled port, such that each port can be sequentially
+ addressed (ie: disable LW address interleaving).
+ Enabling this bit allows BOTH PORTs to be active and
+ sequentially addressable. The single port that is
+ enabled(ENA_Px) will respond to the low-512MB LLM address
+ space, and the other 'disabled' port will respond to the
+ high-512MB LLM address space.
+ Example usage:
+ - DFA RLD0 pins used for TCAM-FPGA(CP2 accesses)
+ - DFA RLD1 pins used for RLDRAM (DTE/CP2 accesses).
+ USAGE NOTE:
+ If LPP_ENA=1 and SW DOES NOT initialize the disabled port
+ (ie: INIT_Px=0->1), then refreshes and the HW init
+ sequence WILL NOT occur for the disabled port.
+ If LPP_ENA=1 and SW does initialize the disabled port
+ (INIT_Px=0->1 with ENA_Px=0), then refreshes and
+ the HW init sequence WILL occur to the disabled port. */
+ uint64_t bunk_init : 2; /**< Controls the CS_N[1:0] during a) a HW Initialization
+ sequence (triggered by DFA_MEMCFG0[INIT_Px]) or
+ b) during a normal refresh sequence. If
+ the BNK_INIT[x]=1, the corresponding CS_N[x] is driven.
+ NOTE: This is required for DRAM used in a
+ clamshell configuration, since the address lines
+ carry Mode Register write data that is unique
+ per bunk(or clam). In a clamshell configuration,
+ The N3K A[x] pin may be tied into Clam#0's A[x]
+ and also into Clam#1's 'mirrored' address bit A[y]
+ (eg: Clam0 sees A[5] and Clam1 sees A[15]).
+ To support clamshell designs, SW must initiate
+ two separate HW init sequences for the two bunks
+ (or clams) . Before each HW init sequence is triggered,
+ SW must preload the DFA_MEMRLD[22:0] with the data
+ that will be driven onto the A[22:0] wires during
+ an MRS mode register write.
+ NOTE: After the final HW initialization sequence has
+ been triggered, SW must wait 64K eclks before writing
+ the BUNK_INIT[1:0] field = 3'b11 (so that CS_N[1:0] is
+ driven during refresh sequences in normal operation.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t init_p0 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#0 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Set up the DFA_MEMCFG0[CLKDIV] ratio for intended
+ RLDRAM operation.
+ [legal values 0: DIV2 2: DIV3 3: DIV4]
+ 2) Write a '1' into BOTH the DFA_MEM_CFG0[RLDCK_RST]
+ and DFA_MEM_CFG0[RLDQCK90_RST] field at
+ the SAME TIME. This step puts all three DLLs in
+ SW reset (RLDCK, RLDCK90, RLDQK90 DLLs).
+ 3) Write a '0' into the DFA_MEM_CFG0[RLDCK_RST] field.
+ This step takes the RLDCK DLL out of soft-reset so
+ that the DLL can generate the RLDx_CK_H/L clock pins.
+ 4) Wait 1ms (for RLDCK DLL to achieve lock)
+ 5) Write a '0' into DFA_MEM_CFG0[RLDQCK90_RST] field.
+ This step takes the RLDCK90 DLL AND RLDQK90 DLL out
+ of soft-reset.
+ 6) Wait 1ms (for RLDCK90/RLDQK90 DLLs to achieve lock)
+ 7) Enable memory port(s): ENA_P0=1/ENA_P1=1
+ 8) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ - - - - - Hardware Initialization Sequence - - - - -
+ 9) Setup the DFA_MEMCFG0[BUNK_INIT] for the bunk(s)
+ intended to be initialized.
+ 10) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence to that'specific' port.
+ 11) Wait (DFA_MEMCFG0[CLKDIV] * 32K) eclk cycles.
+ [to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers]
+ - - - - - Hardware Initialization Sequence - - - - -
+ 12) Write the DFA_MEMCFG0[BUNK_INIT]=3 to enable
+ refreshes to BOTH bunks.
+ NOTE: In some cases (where the address wires are routed
+ differently between the front and back 'bunks'),
+ SW will need to use DFA_MEMCFG0[BUNK_INIT] bits to
+ control the Hardware initialization sequence for a
+ 'specific bunk'. In these cases, SW would setup the
+ BUNK_INIT and repeat Steps \#9-11 for each bunk/port.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t init_p1 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#1 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Set up the DFA_MEMCFG0[CLKDIV] ratio for intended
+ RLDRAM operation.
+ [legal values 0: DIV2 2: DIV3 3: DIV4]
+ 2) Write a '1' into BOTH the DFA_MEM_CFG0[RLDCK_RST]
+ and DFA_MEM_CFG0[RLDQCK90_RST] field at
+ the SAME TIME. This step puts all three DLLs in
+ SW reset (RLDCK, RLDCK90, RLDQK90 DLLs).
+ 3) Write a '0' into the DFA_MEM_CFG0[RLDCK_RST] field.
+ This step takes the RLDCK DLL out of soft-reset so
+ that the DLL can generate the RLDx_CK_H/L clock pins.
+ 4) Wait 1ms (for RLDCK DLL to achieve lock)
+ 5) Write a '0' into DFA_MEM_CFG0[RLDQCK90_RST] field.
+ This step takes the RLDCK90 DLL AND RLDQK90 DLL out
+ of soft-reset.
+ 6) Wait 1ms (for RLDCK90/RLDQK90 DLLs to achieve lock)
+ 7) Enable memory port(s) ENA_P0=1/ENA_P1=1
+ 8) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ - - - - - Hardware Initialization Sequence - - - - -
+ 9) Setup the DFA_MEMCFG0[BUNK_INIT] for the bunk(s)
+ intended to be initialized.
+ 10) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence to that'specific' port.
+ 11) Wait (DFA_MEMCFG0[CLKDIV] * 32K) eclk cycles.
+ [to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers]
+ - - - - - Hardware Initialization Sequence - - - - -
+ 12) Write the DFA_MEMCFG0[BUNK_INIT]=3 to enable
+ refreshes to BOTH bunks.
+ NOTE: In some cases (where the address wires are routed
+ differently between the front and back 'bunks'),
+ SW will need to use DFA_MEMCFG0[BUNK_INIT] bits to
+ control the Hardware initialization sequence for a
+ 'specific bunk'. In these cases, SW would setup the
+ BUNK_INIT and repeat Steps \#9-11 for each bunk/port.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+ uint64_t r2r_pbunk : 1; /**< When enabled, an additional command bubble is inserted
+ if back to back reads are issued to different physical
+ bunks. This is to avoid DQ data bus collisions when
+ references cross between physical bunks.
+ [NOTE: the physical bunk address boundary is determined
+ by the PBUNK bit].
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t pbunk : 3; /**< Physical Bunk address bit pointer.
+ Specifies which address bit within the Longword
+ Memory address MA[23:0] is used to determine the
+ chip selects.
+ [RLD_CS0_N corresponds to physical bunk \#0, and
+ RLD_CS1_N corresponds to physical bunk \#1].
+ - 000: CS0_N = MA[19]/CS1_N = !MA[19]
+ - 001: CS0_N = MA[20]/CS1_N = !MA[20]
+ - 010: CS0_N = MA[21]/CS1_N = !MA[21]
+ - 011: CS0_N = MA[22]/CS1_N = !MA[22]
+ - 100: CS0_N = MA[23]/CS1_N = !MA[23]
+ - 101-111: CS0_N = 0 /CS1_N = 1
+ Example(s):
+ To build out a 128MB DFA memory, 4x 32Mx9
+ parts could be used to fill out TWO physical
+ bunks (clamshell configuration). Each (of the
+ two) physical bunks contains 2x 32Mx9 = 16Mx36.
+ Each RLDRAM device also contains 8 internal banks,
+ therefore the memory Address is 16M/8banks = 2M
+ addresses/bunk (2^21). In this case, MA[21] would
+ select the physical bunk.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ be used to determine the Chip Select(s). */
+ uint64_t blen : 1; /**< Device Burst Length (0=2-burst/1=4-burst)
+ NOTE: RLDRAM-II MUST USE BLEN=0(2-burst) */
+ uint64_t bprch : 2; /**< Tristate Enable (back porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable back porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t fprch : 2; /**< Tristate Enable (front porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable front porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t wr_dly : 4; /**< Write->Read CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from write to read. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II(BL2): (TBL=1)
+ WR_DLY = ROUND_UP[((TWL+TBL)*2 - TSKW + FPRCH) / 2] - TRL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the WR_DLY 'may' be tuned down(-1) if bus fight
+ on W->R transitions is not pronounced. */
+ uint64_t rw_dly : 4; /**< Read->Write CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from read to write. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II(BL2): (TBL=1)
+ RW_DLY = ROUND_UP[((TRL+TBL)*2 + TSKW + BPRCH+2)/2] - TWL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the RW_DLY 'may' be tuned down(-1) if bus fight
+ on R->W transitions is not pronounced. */
+ uint64_t sil_lat : 2; /**< Silo Latency (\#dclks): On reads, determines how many
+ additional dclks to wait (on top of tRL+1) before
+ pulling data out of the padring silos used for time
+ domain boundary crossing.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t mtype : 1; /**< FCRAM-II Memory Type
+ *** O9N UNSUPPORTED *** */
+ uint64_t reserved_2_2 : 1;
+ uint64_t ena_p0 : 1; /**< Enable DFA RLDRAM Port#0
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#0.
+ NOTE: a customer is at
+ liberty to enable either Port#0 or Port#1 or both.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t ena_p1 : 1; /**< Enable DFA RLDRAM Port#1
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#1.
+ NOTE: a customer is at
+ liberty to enable either Port#0 or Port#1 or both.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+#else
+ uint64_t ena_p1 : 1;
+ uint64_t ena_p0 : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t mtype : 1;
+ uint64_t sil_lat : 2;
+ uint64_t rw_dly : 4;
+ uint64_t wr_dly : 4;
+ uint64_t fprch : 2;
+ uint64_t bprch : 2;
+ uint64_t blen : 1;
+ uint64_t pbunk : 3;
+ uint64_t r2r_pbunk : 1;
+ uint64_t init_p1 : 1;
+ uint64_t init_p0 : 1;
+ uint64_t bunk_init : 2;
+ uint64_t lpp_ena : 1;
+ uint64_t clkdiv : 2;
+ uint64_t rldck_rst : 1;
+ uint64_t rldqck90_rst : 1;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_dfa_memcfg0_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_28_63 : 36;
+ uint64_t lpp_ena : 1; /**< PP Linear Port Addressing Mode Enable
+ When enabled, PP-core LLM accesses to the lower-512MB
+ LLM address space are sent to the single DFA port
+ which is enabled. NOTE: If LPP_ENA=1, only
+ one DFA RLDRAM port may be enabled for RLDRAM accesses
+ (ie: ENA_P0 and ENA_P1 CAN NEVER BOTH be set).
+ PP-core LLM accesses to the upper-512MB LLM address
+ space are sent to the other 'disabled' DFA port.
+ SW RESTRICTION: If LPP_ENA=1, then only one DFA port
+ may be enabled for RLDRAM accesses (ie: ENA_P0 and
+ ENA_P1 CAN NEVER BOTH be set).
+ NOTE: This bit is used to allow PP-Core LLM accesses to a
+ disabled port, such that each port can be sequentially
+ addressed (ie: disable LW address interleaving).
+ Enabling this bit allows BOTH PORTs to be active and
+ sequentially addressable. The single port that is
+ enabled(ENA_Px) will respond to the low-512MB LLM address
+ space, and the other 'disabled' port will respond to the
+ high-512MB LLM address space.
+ Example usage:
+ - DFA RLD0 pins used for TCAM-FPGA(CP2 accesses)
+ - DFA RLD1 pins used for RLDRAM (DTE/CP2 accesses).
+ USAGE NOTE:
+ If LPP_ENA=1 and SW DOES NOT initialize the disabled port
+ (ie: INIT_Px=0->1), then refreshes and the HW init
+ sequence WILL NOT occur for the disabled port.
+ If LPP_ENA=1 and SW does initialize the disabled port
+ (INIT_Px=0->1 with ENA_Px=0), then refreshes and
+ the HW init sequence WILL occur to the disabled port. */
+ uint64_t bunk_init : 2; /**< Controls the CS_N[1:0] during a) a HW Initialization
+ sequence (triggered by DFA_MEMCFG0[INIT_Px]) or
+ b) during a normal refresh sequence. If
+ the BNK_INIT[x]=1, the corresponding CS_N[x] is driven.
+ NOTE: This is required for DRAM used in a
+ clamshell configuration, since the address lines
+ carry Mode Register write data that is unique
+ per bunk(or clam). In a clamshell configuration,
+ The N3K A[x] pin may be tied into Clam#0's A[x]
+ and also into Clam#1's 'mirrored' address bit A[y]
+ (eg: Clam0 sees A[5] and Clam1 sees A[15]).
+ To support clamshell designs, SW must initiate
+ two separate HW init sequences for the two bunks
+ (or clams) . Before each HW init sequence is triggered,
+ SW must preload the DFA_MEMRLD[22:0] with the data
+ that will be driven onto the A[22:0] wires during
+ an MRS mode register write.
+ NOTE: After the final HW initialization sequence has
+ been triggered, SW must wait 64K eclks before writing
+ the BUNK_INIT[1:0] field = 3'b11 (so that CS_N[1:0] is
+ driven during refresh sequences in normal operation.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For MTYPE=1(FCRAM) Mode, each bunk MUST BE
+ initialized independently. In other words, a HW init
+ must be done for Bunk#0, and then another HW init
+ must be done for Bunk#1 at power-on. */
+ uint64_t init_p0 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#0 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Enable memory port(s):
+ a) ENA_P1=1 (single port in pass 1) OR
+ b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1)
+ 2) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ 3) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence.
+ NOTE: After writing a '1', SW must wait 64K eclk
+ cycles to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t init_p1 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#1 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Enable memory port(s):
+ a) ENA_P1=1 (single port in pass 1) OR
+ b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1)
+ 2) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ 3) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence.
+ NOTE: After writing a '1', SW must wait 64K eclk
+ cycles to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+ uint64_t r2r_pbunk : 1; /**< When enabled, an additional command bubble is inserted
+ if back to back reads are issued to different physical
+ bunks. This is to avoid DQ data bus collisions when
+ references cross between physical bunks.
+ [NOTE: the physical bunk address boundary is determined
+ by the PBUNK bit].
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ When MTYPE=1(FCRAM)/BLEN=0(2-burst), R2R_PBUNK SHOULD BE
+ ZERO(for optimal performance). However, if electrically,
+ DQ-sharing becomes a power/heat issue, then R2R_PBUNK
+ should be set (but at a cost to performance (1/2 BW). */
+ uint64_t pbunk : 3; /**< Physical Bunk address bit pointer.
+ Specifies which address bit within the Longword
+ Memory address MA[23:0] is used to determine the
+ chip selects.
+ [RLD_CS0_N corresponds to physical bunk \#0, and
+ RLD_CS1_N corresponds to physical bunk \#1].
+ - 000: CS0_N = MA[19]/CS1_N = !MA[19]
+ - 001: CS0_N = MA[20]/CS1_N = !MA[20]
+ - 010: CS0_N = MA[21]/CS1_N = !MA[21]
+ - 011: CS0_N = MA[22]/CS1_N = !MA[22]
+ - 100: CS0_N = MA[23]/CS1_N = !MA[23]
+ - 101-111: CS0_N = 0 /CS1_N = 1
+ Example(s):
+ To build out a 128MB DFA memory, 4x 32Mx9
+ parts could be used to fill out TWO physical
+ bunks (clamshell configuration). Each (of the
+ two) physical bunks contains 2x 32Mx9 = 16Mx36.
+ Each RLDRAM device also contains 8 internal banks,
+ therefore the memory Address is 16M/8banks = 2M
+ addresses/bunk (2^21). In this case, MA[21] would
+ select the physical bunk.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ be used to determine the Chip Select(s).
+ NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), a
+ "Redundant Bunk" scheme is employed to provide the
+ highest overall performance (1 Req/ MCLK cycle).
+ In this mode, it's imperative that SW set the PBUNK
+ field +1 'above' the highest address bit. (such that
+ the PBUNK extracted from the address will always be
+ zero). In this mode, the CS_N[1:0] pins are driven
+ to each redundant bunk based on a TDM scheme:
+ [MCLK-EVEN=Bunk#0/MCLK-ODD=Bunk#1]. */
+ uint64_t blen : 1; /**< Device Burst Length (0=2-burst/1=4-burst)
+ When BLEN=0(BL2), all QW reads/writes from CP2 are
+ decomposed into 2 separate BL2(LW) requests to the
+ Low-Latency memory.
+ When BLEN=1(BL4), a LW request (from CP2 or NCB) is
+ treated as 1 BL4(QW) request to the low latency memory.
+ NOTE: QW refers to a 64-bit LLM Load/Store (intiated
+ by a processor core). LW refers to a 36-bit load/store.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization before the DFA LLM
+ (low latency memory) is used.
+ NOTE: MTYPE=0(RLDRAM-II) MUST USE BLEN=0(2-burst)
+ NOTE: MTYPE=1(FCRAM)/BLEN=0(BL2) requires a
+ multi-bunk(clam) board design.
+ NOTE: If MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=1(BL4),
+ SW SHOULD use CP2 QW read/write requests (for
+ optimal low-latency bus performance).
+ [LW length read/write requests(in BL4 mode) use 50%
+ of the available bus bandwidth]
+ NOTE: MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=0(BL2) can only
+ be used with FCRAM-II devices which support BL2 mode
+ (see: Toshiba FCRAM-II, where DQ tristate after 2 data
+ transfers).
+ NOTE: MTYPE=1(FCRAM)/FCRAM2P=1(II+) does not support LW
+ write requests (FCRAM-II+ device specification has removed
+ the variable write mask function from the devices).
+ As such, if this mode is used, SW must be careful to
+ issue only PP-CP2 QW write requests. */
+ uint64_t bprch : 2; /**< Tristate Enable (back porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable back porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t fprch : 2; /**< Tristate Enable (front porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable front porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t wr_dly : 4; /**< Write->Read CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from write to read. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II(BL2): (TBL=1)
+ For FCRAM-II (BL4): (TBL=2)
+ For FCRAM-II (BL2 grepl=1x ONLY): (TBL=1)
+ For FCRAM-II (BL2 grepl>=2x): (TBL=3)
+ NOTE: When MTYTPE=1(FCRAM-II) BLEN=0(BL2 Mode),
+ grepl>=2x, writes require redundant bunk writes
+ which require an additional 2 cycles before slotting
+ the next read.
+ WR_DLY = ROUND_UP[((TWL+TBL)*2 - TSKW + FPRCH) / 2] - TRL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the WR_DLY 'may' be tuned down(-1) if bus fight
+ on W->R transitions is not pronounced. */
+ uint64_t rw_dly : 4; /**< Read->Write CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from read to write. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II/FCRAM-II (BL2): (TBL=1)
+ For FCRAM-II (BL4): (TBL=2)
+ RW_DLY = ROUND_UP[((TRL+TBL)*2 + TSKW + BPRCH+2)/2] - TWL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the RW_DLY 'may' be tuned down(-1) if bus fight
+ on R->W transitions is not pronounced. */
+ uint64_t sil_lat : 2; /**< Silo Latency (\#dclks): On reads, determines how many
+ additional dclks to wait (on top of tRL+1) before
+ pulling data out of the padring silos used for time
+ domain boundary crossing.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t mtype : 1; /**< Memory Type (0=RLDRAM-II/1=Network DRAM-II/FCRAM)
+ NOTE: N3K-P1 only supports RLDRAM-II
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), only the
+ "unidirectional DS/QS" mode is supported. (see FCRAM
+ data sheet EMRS[A6:A5]=SS(Strobe Select) register
+ definition. [in FCRAM 2-burst mode, we use FCRAM
+ in a clamshell configuration such that clam0 is
+ addressed independently of clam1, and DQ is shared
+ for optimal performance. As such it's imperative that
+ the QS are conditionally received (and are NOT
+ free-running), as the N3K receive data capture silos
+ OR the clam0/1 QS strobes.
+ NOTE: If this bit is SET, the ASX0/1
+ ASX_RLD_FCRAM_MODE[MODE] bit(s) should also be SET
+ in order for the RLD0/1-PHY(s) to support FCRAM devices. */
+ uint64_t reserved_2_2 : 1;
+ uint64_t ena_p0 : 1; /**< Enable DFA RLDRAM Port#0
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#0.
+ NOTE: For N3K-P1, to enable Port#0(2nd port),
+ Port#1 MUST ALSO be enabled.
+ NOTE: For N3K-P2, single port mode, a customer is at
+ liberty to enable either Port#0 or Port#1.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t ena_p1 : 1; /**< Enable DFA RLDRAM Port#1
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#1.
+ NOTE: For N3K-P1, If the customer wishes to use a
+ single port, s/he must enable Port#1 (and not Port#0).
+ NOTE: For N3K-P2, single port mode, a customer is at
+ liberty to enable either Port#0 or Port#1.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+#else
+ uint64_t ena_p1 : 1;
+ uint64_t ena_p0 : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t mtype : 1;
+ uint64_t sil_lat : 2;
+ uint64_t rw_dly : 4;
+ uint64_t wr_dly : 4;
+ uint64_t fprch : 2;
+ uint64_t bprch : 2;
+ uint64_t blen : 1;
+ uint64_t pbunk : 3;
+ uint64_t r2r_pbunk : 1;
+ uint64_t init_p1 : 1;
+ uint64_t init_p0 : 1;
+ uint64_t bunk_init : 2;
+ uint64_t lpp_ena : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } cn38xx;
+ struct cvmx_dfa_memcfg0_cn38xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_27_63 : 37;
+ uint64_t bunk_init : 2; /**< Controls the CS_N[1:0] during a) a HW Initialization
+ sequence (triggered by DFA_MEMCFG0[INIT_Px]) or
+ b) during a normal refresh sequence. If
+ the BNK_INIT[x]=1, the corresponding CS_N[x] is driven.
+ NOTE: This is required for DRAM used in a
+ clamshell configuration, since the address lines
+ carry Mode Register write data that is unique
+ per bunk(or clam). In a clamshell configuration,
+ The N3K A[x] pin may be tied into Clam#0's A[x]
+ and also into Clam#1's 'mirrored' address bit A[y]
+ (eg: Clam0 sees A[5] and Clam1 sees A[15]).
+ To support clamshell designs, SW must initiate
+ two separate HW init sequences for the two bunks
+ (or clams) . Before each HW init sequence is triggered,
+ SW must preload the DFA_MEMRLD[22:0] with the data
+ that will be driven onto the A[22:0] wires during
+ an MRS mode register write.
+ NOTE: After the final HW initialization sequence has
+ been triggered, SW must wait 64K eclks before writing
+ the BUNK_INIT[1:0] field = 3'b11 (so that CS_N[1:0] is
+ driven during refresh sequences in normal operation.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For MTYPE=1(FCRAM) Mode, each bunk MUST BE
+ initialized independently. In other words, a HW init
+ must be done for Bunk#0, and then another HW init
+ must be done for Bunk#1 at power-on. */
+ uint64_t init_p0 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#0 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Enable memory port(s):
+ a) ENA_P1=1 (single port in pass 1) OR
+ b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1)
+ 2) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ 3) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence.
+ NOTE: After writing a '1', SW must wait 64K eclk
+ cycles to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t init_p1 : 1; /**< When a '1' is written (and the previous value was '0'),
+ the HW init sequence(s) for Memory Port \#1 is
+ initiated.
+ NOTE: To initialize memory, SW must:
+ 1) Enable memory port(s):
+ a) ENA_P1=1 (single port in pass 1) OR
+ b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1)
+ 2) Wait 100us (to ensure a stable clock
+ to the RLDRAMs) - as per RLDRAM spec.
+ 3) Write a '1' to the corresponding INIT_Px which
+ will initiate a hardware initialization
+ sequence.
+ NOTE: After writing a '1', SW must wait 64K eclk
+ cycles to ensure the HW init sequence has completed
+ before writing to ANY of the DFA_MEM* registers.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+ uint64_t r2r_pbunk : 1; /**< When enabled, an additional command bubble is inserted
+ if back to back reads are issued to different physical
+ bunks. This is to avoid DQ data bus collisions when
+ references cross between physical bunks.
+ [NOTE: the physical bunk address boundary is determined
+ by the PBUNK bit].
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ When MTYPE=1(FCRAM)/BLEN=0(2-burst), R2R_PBUNK SHOULD BE
+ ZERO(for optimal performance). However, if electrically,
+ DQ-sharing becomes a power/heat issue, then R2R_PBUNK
+ should be set (but at a cost to performance (1/2 BW). */
+ uint64_t pbunk : 3; /**< Physical Bunk address bit pointer.
+ Specifies which address bit within the Longword
+ Memory address MA[23:0] is used to determine the
+ chip selects.
+ [RLD_CS0_N corresponds to physical bunk \#0, and
+ RLD_CS1_N corresponds to physical bunk \#1].
+ - 000: CS0_N = MA[19]/CS1_N = !MA[19]
+ - 001: CS0_N = MA[20]/CS1_N = !MA[20]
+ - 010: CS0_N = MA[21]/CS1_N = !MA[21]
+ - 011: CS0_N = MA[22]/CS1_N = !MA[22]
+ - 100: CS0_N = MA[23]/CS1_N = !MA[23]
+ - 101-111: CS0_N = 0 /CS1_N = 1
+ Example(s):
+ To build out a 128MB DFA memory, 4x 32Mx9
+ parts could be used to fill out TWO physical
+ bunks (clamshell configuration). Each (of the
+ two) physical bunks contains 2x 32Mx9 = 16Mx36.
+ Each RLDRAM device also contains 8 internal banks,
+ therefore the memory Address is 16M/8banks = 2M
+ addresses/bunk (2^21). In this case, MA[21] would
+ select the physical bunk.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ be used to determine the Chip Select(s).
+ NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), a
+ "Redundant Bunk" scheme is employed to provide the
+ highest overall performance (1 Req/ MCLK cycle).
+ In this mode, it's imperative that SW set the PBUNK
+ field +1 'above' the highest address bit. (such that
+ the PBUNK extracted from the address will always be
+ zero). In this mode, the CS_N[1:0] pins are driven
+ to each redundant bunk based on a TDM scheme:
+ [MCLK-EVEN=Bunk#0/MCLK-ODD=Bunk#1]. */
+ uint64_t blen : 1; /**< Device Burst Length (0=2-burst/1=4-burst)
+ When BLEN=0(BL2), all QW reads/writes from CP2 are
+ decomposed into 2 separate BL2(LW) requests to the
+ Low-Latency memory.
+ When BLEN=1(BL4), a LW request (from CP2 or NCB) is
+ treated as 1 BL4(QW) request to the low latency memory.
+ NOTE: QW refers to a 64-bit LLM Load/Store (intiated
+ by a processor core). LW refers to a 36-bit load/store.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization before the DFA LLM
+ (low latency memory) is used.
+ NOTE: MTYPE=0(RLDRAM-II) MUST USE BLEN=0(2-burst)
+ NOTE: MTYPE=1(FCRAM)/BLEN=0(BL2) requires a
+ multi-bunk(clam) board design.
+ NOTE: If MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=1(BL4),
+ SW SHOULD use CP2 QW read/write requests (for
+ optimal low-latency bus performance).
+ [LW length read/write requests(in BL4 mode) use 50%
+ of the available bus bandwidth]
+ NOTE: MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=0(BL2) can only
+ be used with FCRAM-II devices which support BL2 mode
+ (see: Toshiba FCRAM-II, where DQ tristate after 2 data
+ transfers).
+ NOTE: MTYPE=1(FCRAM)/FCRAM2P=1(II+) does not support LW
+ write requests (FCRAM-II+ device specification has removed
+ the variable write mask function from the devices).
+ As such, if this mode is used, SW must be careful to
+ issue only PP-CP2 QW write requests. */
+ uint64_t bprch : 2; /**< Tristate Enable (back porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable back porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t fprch : 2; /**< Tristate Enable (front porch) (\#dclks)
+ On reads, allows user to control the shape of the
+ tristate disable front porch for the DQ data bus.
+ This parameter is also very dependent on the
+ RW_DLY and WR_DLY parameters and care must be
+ taken when programming these parameters to avoid
+ data bus contention. Valid range [0..2]
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t wr_dly : 4; /**< Write->Read CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from write to read. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II(BL2): (TBL=1)
+ For FCRAM-II (BL4): (TBL=2)
+ For FCRAM-II (BL2 grepl=1x ONLY): (TBL=1)
+ For FCRAM-II (BL2 grepl>=2x): (TBL=3)
+ NOTE: When MTYTPE=1(FCRAM-II) BLEN=0(BL2 Mode),
+ grepl>=2x, writes require redundant bunk writes
+ which require an additional 2 cycles before slotting
+ the next read.
+ WR_DLY = ROUND_UP[((TWL+TBL)*2 - TSKW + FPRCH) / 2] - TRL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the WR_DLY 'may' be tuned down(-1) if bus fight
+ on W->R transitions is not pronounced. */
+ uint64_t rw_dly : 4; /**< Read->Write CMD Delay (\#mclks):
+ Determines \#mclk cycles to insert when controller
+ switches from read to write. This allows programmer
+ to control the data bus contention.
+ For RLDRAM-II/FCRAM-II (BL2): (TBL=1)
+ For FCRAM-II (BL4): (TBL=2)
+ RW_DLY = ROUND_UP[((TRL+TBL)*2 + TSKW + BPRCH+2)/2] - TWL + 1
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: For aggressive(performance optimal) designs,
+ the RW_DLY 'may' be tuned down(-1) if bus fight
+ on R->W transitions is not pronounced. */
+ uint64_t sil_lat : 2; /**< Silo Latency (\#dclks): On reads, determines how many
+ additional dclks to wait (on top of tRL+1) before
+ pulling data out of the padring silos used for time
+ domain boundary crossing.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t mtype : 1; /**< Memory Type (0=RLDRAM-II/1=Network DRAM-II/FCRAM)
+ NOTE: N3K-P1 only supports RLDRAM-II
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), only the
+ "unidirectional DS/QS" mode is supported. (see FCRAM
+ data sheet EMRS[A6:A5]=SS(Strobe Select) register
+ definition. [in FCRAM 2-burst mode, we use FCRAM
+ in a clamshell configuration such that clam0 is
+ addressed independently of clam1, and DQ is shared
+ for optimal performance. As such it's imperative that
+ the QS are conditionally received (and are NOT
+ free-running), as the N3K receive data capture silos
+ OR the clam0/1 QS strobes.
+ NOTE: If this bit is SET, the ASX0/1
+ ASX_RLD_FCRAM_MODE[MODE] bit(s) should also be SET
+ in order for the RLD0/1-PHY(s) to support FCRAM devices. */
+ uint64_t reserved_2_2 : 1;
+ uint64_t ena_p0 : 1; /**< Enable DFA RLDRAM Port#0
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#0.
+ NOTE: For N3K-P1, to enable Port#0(2nd port),
+ Port#1 MUST ALSO be enabled.
+ NOTE: For N3K-P2, single port mode, a customer is at
+ liberty to enable either Port#0 or Port#1.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#0 corresponds to the Octeon
+ RLD0_* pins. */
+ uint64_t ena_p1 : 1; /**< Enable DFA RLDRAM Port#1
+ When enabled, this bit lets N3K be the default
+ driver for memory port \#1.
+ NOTE: For N3K-P1, If the customer wishes to use a
+ single port, s/he must enable Port#1 (and not Port#0).
+ NOTE: For N3K-P2, single port mode, a customer is at
+ liberty to enable either Port#0 or Port#1.
+ NOTE: Once a port has been disabled, it MUST NEVER
+ be re-enabled. [the only way to enable a port is
+ through a chip reset].
+ NOTE: DFA Memory Port#1 corresponds to the Octeon
+ RLD1_* pins. */
+#else
+ uint64_t ena_p1 : 1;
+ uint64_t ena_p0 : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t mtype : 1;
+ uint64_t sil_lat : 2;
+ uint64_t rw_dly : 4;
+ uint64_t wr_dly : 4;
+ uint64_t fprch : 2;
+ uint64_t bprch : 2;
+ uint64_t blen : 1;
+ uint64_t pbunk : 3;
+ uint64_t r2r_pbunk : 1;
+ uint64_t init_p1 : 1;
+ uint64_t init_p0 : 1;
+ uint64_t bunk_init : 2;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn38xxp2;
+ struct cvmx_dfa_memcfg0_s cn58xx;
+ struct cvmx_dfa_memcfg0_s cn58xxp1;
+} cvmx_dfa_memcfg0_t;
+
+
+/**
+ * cvmx_dfa_memcfg1
+ *
+ * DFA_MEMCFG1 = RLDRAM Memory Timing Configuration
+ *
+ * Description:
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_memcfg1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_34_63 : 30;
+ uint64_t ref_intlo : 9; /**< Burst Refresh Interval[8:0] (\#dclks)
+ For finer refresh interval granularity control.
+ This field provides an additional level of granularity
+ for the refresh interval. It specifies the additional
+ \#dclks [0...511] to be added to the REF_INT[3:0] field.
+ For RLDRAM-II: For dclk(400MHz=2.5ns):
+ Example: 64K AREF cycles required within tREF=32ms
+ trefint = tREF(ms)/(64K cycles/8banks)
+ = 32ms/8K = 3.9us = 3900ns
+ REF_INT[3:0] = ROUND_DOWN[(trefint/dclk)/512]
+ = ROUND_DOWN[(3900/2.5)/512]
+ = 3
+ REF_INTLO[8:0] = MOD[(trefint/dclk)/512]
+ = MOD[(3900/2.5)/512]
+ = 24
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ *** NOTE: PASS2 Addition */
+ uint64_t aref_ena : 1; /**< Auto Refresh Cycle Enable
+ INTERNAL USE ONLY:
+ NOTE: This mode bit is ONLY intended to be used by
+ low-level power-on initialization routines in the
+ event that the hardware initialization routine
+ does not work. It allows SW to create AREF
+ commands on the RLDRAM bus directly.
+ When this bit is set, ALL RLDRAM writes (issued by
+ a PP through the NCB or CP2) are converted to AREF
+ commands on the RLDRAM bus. The write-address is
+ presented on the A[20:0]/BA[2:0] pins (for which
+ the RLDRAM only interprets BA[2:0]).
+ When this bit is set, only writes are allowed
+ and MUST use grepl=0 (1x).
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: MRS_ENA and AREF_ENA are mutually exclusive
+ (SW can set one or the other, but never both!)
+ NOTE: AREF commands generated using this method target
+ the 'addressed' bunk. */
+ uint64_t mrs_ena : 1; /**< Mode Register Set Cycle Enable
+ INTERNAL USE ONLY:
+ NOTE: This mode bit is ONLY intended to be used by
+ low-level power-on initialization routines in the
+ event that the hardware initialization routine
+ does not work. It allows SW to create MRS
+ commands on the RLDRAM bus directly.
+ When this bit is set, ALL RLDRAM writes (issued by
+ a PP through the NCB or CP2) are converted to MRS
+ commands on the RLDRAM bus. The write-address is
+ presented on the A[20:0]/BA[2:0] pins (for which
+ the RLDRAM only interprets A[17:0]).
+ When this bit is set, only writes are allowed
+ and MUST use grepl=0 (1x).
+ NOTE: This should only be written to a different value
+ during power-on SW initialization.
+ NOTE: MRS_ENA and AREF_ENA are mutually exclusive
+ (SW can set one or the other, but never both!)
+ NOTE: MRS commands generated using this method target
+ the 'addressed' bunk. */
+ uint64_t tmrsc : 3; /**< Mode Register Set Cycle Time (represented in \#mclks)
+ - 000-001: RESERVED
+ - 010: tMRSC = 2 mclks
+ - 011: tMRSC = 3 mclks
+ - ...
+ - 111: tMRSC = 7 mclks
+ NOTE: The device tMRSC parameter is a function of CL
+ (which during HW initialization is not known. Its
+ recommended to load tMRSC(MAX) value to avoid timing
+ violations.
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t trc : 4; /**< Row Cycle Time (represented in \#mclks)
+ see also: DFA_MEMRLD[RLCFG] field which must
+ correspond with tRL/tWL parameter(s).
+ - 0000-0010: RESERVED
+ - 0011: tRC = 3 mclks
+ - 0100: tRC = 4 mclks
+ - 0101: tRC = 5 mclks
+ - 0110: tRC = 6 mclks
+ - 0111: tRC = 7 mclks
+ - 1000: tRC = 8 mclks
+ - 1001: tRC = 9 mclks
+ - 1010-1111: RESERVED
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t twl : 4; /**< Write Latency (represented in \#mclks)
+ see also: DFA_MEMRLD[RLCFG] field which must
+ correspond with tRL/tWL parameter(s).
+ - 0000-0001: RESERVED
+ - 0010: Write Latency (WL=2.0 mclk)
+ - 0011: Write Latency (WL=3.0 mclks)
+ - 0100: Write Latency (WL=4.0 mclks)
+ - 0101: Write Latency (WL=5.0 mclks)
+ - 0110: Write Latency (WL=6.0 mclks)
+ - 0111: Write Latency (WL=7.0 mclks)
+ - 1000: Write Latency (WL=8.0 mclks)
+ - 1001: Write Latency (WL=9.0 mclks)
+ - 1010: Write Latency (WL=10.0 mclks)
+ - 1011-1111: RESERVED
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t trl : 4; /**< Read Latency (represented in \#mclks)
+ see also: DFA_MEMRLD[RLCFG] field which must
+ correspond with tRL/tWL parameter(s).
+ - 0000-0010: RESERVED
+ - 0011: Read Latency = 3 mclks
+ - 0100: Read Latency = 4 mclks
+ - 0101: Read Latency = 5 mclks
+ - 0110: Read Latency = 6 mclks
+ - 0111: Read Latency = 7 mclks
+ - 1000: Read Latency = 8 mclks
+ - 1001: Read Latency = 9 mclks
+ - 1010: Read Latency = 10 mclks
+ - 1011-1111: RESERVED
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tskw : 2; /**< Board Skew (represented in \#dclks)
+ Represents additional board skew of DQ/DQS.
+ - 00: board-skew = 0 dclk
+ - 01: board-skew = 1 dclk
+ - 10: board-skew = 2 dclk
+ - 11: board-skew = 3 dclk
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t ref_int : 4; /**< Refresh Interval (represented in \#of 512 dclk
+ increments).
+ - 0000: RESERVED
+ - 0001: 1 * 512 = 512 dclks
+ - ...
+ - 1111: 15 * 512 = 7680 dclks
+ NOTE: For finer level of granularity, refer to
+ REF_INTLO[8:0] field.
+ For RLDRAM-II, each refresh interval will
+ generate a burst of 8 AREF commands, one to each of
+ 8 explicit banks (referenced using the RLD_BA[2:0]
+ pins.
+ Example: For mclk=200MHz/dclk(400MHz=2.5ns):
+ 64K AREF cycles required within tREF=32ms
+ trefint = tREF(ms)/(64K cycles/8banks)
+ = 32ms/8K = 3.9us = 3900ns
+ REF_INT = ROUND_DOWN[(trefint/dclk)/512]
+ = ROUND_DOWN[(3900/2.5)/512]
+ = 3
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t ref_int : 4;
+ uint64_t tskw : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t trl : 4;
+ uint64_t twl : 4;
+ uint64_t trc : 4;
+ uint64_t tmrsc : 3;
+ uint64_t mrs_ena : 1;
+ uint64_t aref_ena : 1;
+ uint64_t ref_intlo : 9;
+ uint64_t reserved_34_63 : 30;
+#endif
+ } s;
+ struct cvmx_dfa_memcfg1_s cn38xx;
+ struct cvmx_dfa_memcfg1_s cn38xxp2;
+ struct cvmx_dfa_memcfg1_s cn58xx;
+ struct cvmx_dfa_memcfg1_s cn58xxp1;
+} cvmx_dfa_memcfg1_t;
+
+
+/**
+ * cvmx_dfa_memcfg2
+ *
+ * DFA_MEMCFG2 = DFA Memory Config Register \#2
+ * *** NOTE: Pass2 Addition
+ *
+ * Description: Additional Memory Configuration CSRs to support FCRAM-II/II+ and Network DRAM-II
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_memcfg2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t dteclkdis : 1; /**< DFA DTE Clock Disable
+ When SET, the DFA clocks for DTE(thread engine)
+ operation are disabled.
+ NOTE: When SET, SW MUST NEVER issue ANY operations to
+ the DFA via the NCB Bus. All DFA Operations must be
+ issued solely through the CP2 interface.
+ *** NOTE: PASS2 Addition
+ NOTE: When DTECLKDIS=1, if CP2 Errors are encountered
+ (ie: CP2SBE, CP2DBE, CP2PERR), the DFA_MEMFADR CSR
+ does not reflect the failing address/ctl information. */
+ uint64_t silrst : 1; /**< LLM-PHY Silo Reset
+ When a '1' is written (when the previous
+ value was a '0') causes the the LLM-PHY Silo read/write
+ pointers to be reset.
+ NOTE: SW MUST WAIT 400 dclks after the LAST HW Init
+ sequence was launched (ie: INIT_START 0->1 CSR write),
+ before the SILRST can be triggered (0->1). */
+ uint64_t trfc : 5; /**< FCRAM-II Refresh Interval
+ *** O9N UNSUPPORTED *** */
+ uint64_t refshort : 1; /**< FCRAM Short Refresh Mode
+ *** O9N UNSUPPORTED *** */
+ uint64_t ua_start : 2; /**< FCRAM-II Upper Addres Start
+ *** O9N UNSUPPORTED *** */
+ uint64_t maxbnk : 1; /**< Maximum Banks per-device (used by the address mapper
+ when extracting address bits for the memory bank#.
+ - 0: 4 banks/device
+ - 1: 8 banks/device
+ *** NOTE: PASS2 Addition */
+ uint64_t fcram2p : 1; /**< FCRAM-II+ Mode Enable
+ *** O9N UNSUPPORTED *** */
+#else
+ uint64_t fcram2p : 1;
+ uint64_t maxbnk : 1;
+ uint64_t ua_start : 2;
+ uint64_t refshort : 1;
+ uint64_t trfc : 5;
+ uint64_t silrst : 1;
+ uint64_t dteclkdis : 1;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_dfa_memcfg2_s cn38xx;
+ struct cvmx_dfa_memcfg2_s cn38xxp2;
+ struct cvmx_dfa_memcfg2_s cn58xx;
+ struct cvmx_dfa_memcfg2_s cn58xxp1;
+} cvmx_dfa_memcfg2_t;
+
+
+/**
+ * cvmx_dfa_memfadr
+ *
+ * DFA_MEMFADR = RLDRAM Failing Address/Control Register
+ *
+ * Description: DFA Memory Failing Address/Control Error Capture information
+ * This register contains useful information to help in isolating an RLDRAM memory failure.
+ * NOTE: The first detected SEC/DED/PERR failure is captured in DFA_MEMFADR, however, a DED or PERR (which is
+ * more severe) will always overwrite a SEC error. The user can 'infer' the source of the interrupt
+ * via the FSRC field.
+ * NOTE: If DFA_MEMCFG2[DTECLKDIS]=1, the contents of this register are UNDEFINED.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_memfadr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_24_63 : 40;
+ uint64_t maddr : 24; /**< Memory Address */
+#else
+ uint64_t maddr : 24;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_dfa_memfadr_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_40_63 : 24;
+ uint64_t fdst : 9; /**< Fill-Destination
+ FSRC[1:0] | FDST[8:0]
+ -------------+-------------------------------------
+ 0(NCB-DTE) | [fillstart,2'b0,WIDX(1),DMODE(1),DTE(4)]
+ 1(NCB-CSR) | [ncbSRC[8:0]]
+ 3(CP2-PP) | [2'b0,SIZE(1),INDEX(1),PP(4),FID(1)]
+ where:
+ DTE: DFA Thread Engine ID#
+ PP: Packet Processor ID#
+ FID: Fill-ID# (unique per PP)
+ WIDX: 16b SIMPLE Mode (index)
+ DMODE: (0=16b SIMPLE/1=32b SIMPLE)
+ SIZE: (0=LW Mode access/1=QW Mode Access)
+ INDEX: (0=Low LW/1=High LW)
+ NOTE: QW refers to a 56/64-bit LLM Load/Store (intiated
+ by a processor core). LW refers to a 32-bit load/store. */
+ uint64_t fsrc : 2; /**< Fill-Source (0=NCB-DTE/1=NCB-CSR/2=RESERVED/3=PP-CP2) */
+ uint64_t pnum : 1; /**< Memory Port
+ NOTE: For O2P, this bit will always return zero. */
+ uint64_t bnum : 3; /**< Memory Bank
+ When DFA_DDR2_ADDR[RNK_LO]=1, BNUM[2]=RANK[0].
+ (RANK[1] can be inferred from MADDR[24:0]) */
+ uint64_t maddr : 25; /**< Memory Address */
+#else
+ uint64_t maddr : 25;
+ uint64_t bnum : 3;
+ uint64_t pnum : 1;
+ uint64_t fsrc : 2;
+ uint64_t fdst : 9;
+ uint64_t reserved_40_63 : 24;
+#endif
+ } cn31xx;
+ struct cvmx_dfa_memfadr_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_39_63 : 25;
+ uint64_t fdst : 9; /**< Fill-Destination
+ FSRC[1:0] | FDST[8:0]
+ -------------+-------------------------------------
+ 0(NCB-DTE) | [fillstart,2'b0,WIDX(1),DMODE(1),DTE(4)]
+ 1(NCB-CSR) | [ncbSRC[8:0]]
+ 3(CP2-PP) | [2'b0,SIZE(1),INDEX(1),PP(4),FID(1)]
+ where:
+ DTE: DFA Thread Engine ID#
+ PP: Packet Processor ID#
+ FID: Fill-ID# (unique per PP)
+ WIDX: 18b SIMPLE Mode (index)
+ DMODE: (0=18b SIMPLE/1=36b SIMPLE)
+ SIZE: (0=LW Mode access/1=QW Mode Access)
+ INDEX: (0=Low LW/1=High LW)
+ NOTE: QW refers to a 64-bit LLM Load/Store (intiated
+ by a processor core). LW refers to a 36-bit load/store. */
+ uint64_t fsrc : 2; /**< Fill-Source (0=NCB-DTE/1=NCB-CSR/2=RESERVED/3=PP-CP2) */
+ uint64_t pnum : 1; /**< Memory Port
+ NOTE: the port id's are reversed
+ PNUM==0 => port#1
+ PNUM==1 => port#0 */
+ uint64_t bnum : 3; /**< Memory Bank */
+ uint64_t maddr : 24; /**< Memory Address */
+#else
+ uint64_t maddr : 24;
+ uint64_t bnum : 3;
+ uint64_t pnum : 1;
+ uint64_t fsrc : 2;
+ uint64_t fdst : 9;
+ uint64_t reserved_39_63 : 25;
+#endif
+ } cn38xx;
+ struct cvmx_dfa_memfadr_cn38xx cn38xxp2;
+ struct cvmx_dfa_memfadr_cn38xx cn58xx;
+ struct cvmx_dfa_memfadr_cn38xx cn58xxp1;
+} cvmx_dfa_memfadr_t;
+
+
+/**
+ * cvmx_dfa_memfcr
+ *
+ * DFA_MEMFCR = FCRAM MRS Register(s) EMRS2[14:0], EMRS1[14:0], MRS[14:0]
+ * *** O9N UNSUPPORTED ***
+ *
+ * Notes:
+ * For FCRAM-II please consult your device's data sheet for further details:
+ * MRS Definition:
+ * A[13:8]=0 RESERVED
+ * A[7]=0 TEST MODE (N3K requires test mode 0:"disabled")
+ * A[6:4] CAS LATENCY (fully programmable - SW must ensure that the value programmed
+ * into DFA_MEM_CFG0[TRL] corresponds with this value).
+ * A[3]=0 BURST TYPE (N3K requires 0:"Sequential" Burst Type)
+ * A[2:0] BURST LENGTH Burst Length [1:BL2/2:BL4] (N3K only supports BL=2,4)
+ *
+ * In BL2 mode(for highest performance), only 1/2 the phsyical
+ * memory is unique (ie: each bunk stores the same information).
+ * In BL4 mode(highest capacity), all of the physical memory
+ * is unique (ie: each bunk is uniquely addressable).
+ * EMRS Definition:
+ * A[13:12] REFRESH MODE (N3K Supports only 0:"Conventional" and 1:"Short" auto-refresh modes)
+ *
+ * (SW must ensure that the value programmed into DFA_MEMCFG2[REFSHORT]
+ * is also reflected in the Refresh Mode encoding).
+ * A[11:7]=0 RESERVED
+ * A[6:5]=2 STROBE SELECT (N3K supports only 2:"Unidirectional DS/QS" mode - the read capture
+ * silos rely on a conditional QS strobe)
+ * A[4:3] DIC(QS) QS Drive Strength: fully programmable (consult your FCRAM-II data sheet)
+ * [0: Normal Output Drive/1: Strong Output Drive/2: Weak output Drive]
+ * A[2:1] DIC(DQ) DQ Drive Strength: fully programmable (consult your FCRAM-II data sheet)
+ * [0: Normal Output Drive/1: Strong Output Drive/2: Weak output Drive]
+ * A[0] DLL DLL Enable: Programmable [0:DLL Enable/1: DLL Disable]
+ *
+ * EMRS2 Definition: (for FCRAM-II+)
+ * A[13:11]=0 RESERVED
+ * A[10:8] ODTDS On Die Termination (DS+/-)
+ * [0: ODT Disable /1: 15ohm termination /(2-7): RESERVED]
+ * A[7:6]=0 MBW Multi-Bank Write: (N3K requires use of 0:"single bank" mode only)
+ * A[5:3] ODTin On Die Termination (input pin)
+ * [0: ODT Disable /1: 15ohm termination /(2-7): RESERVED]
+ * A[2:0] ODTDQ On Die Termination (DQ)
+ * [0: ODT Disable /1: 15ohm termination /(2-7): RESERVED]
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_memfcr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_47_63 : 17;
+ uint64_t emrs2 : 15; /**< Memory Address[14:0] during EMRS2(for FCRAM-II+)
+ *** O9N UNSUPPORTED *** */
+ uint64_t reserved_31_31 : 1;
+ uint64_t emrs : 15; /**< Memory Address[14:0] during EMRS
+ *** O9N UNSUPPORTED ***
+ A[0]=1: DLL Enabled) */
+ uint64_t reserved_15_15 : 1;
+ uint64_t mrs : 15; /**< FCRAM Memory Address[14:0] during MRS
+ *** O9N UNSUPPORTED ***
+ A[6:4]=4 CAS LATENCY=4(default)
+ A[3]=0 Burst Type(must be 0:Sequential)
+ A[2:0]=2 Burst Length=4(default) */
+#else
+ uint64_t mrs : 15;
+ uint64_t reserved_15_15 : 1;
+ uint64_t emrs : 15;
+ uint64_t reserved_31_31 : 1;
+ uint64_t emrs2 : 15;
+ uint64_t reserved_47_63 : 17;
+#endif
+ } s;
+ struct cvmx_dfa_memfcr_s cn38xx;
+ struct cvmx_dfa_memfcr_s cn38xxp2;
+ struct cvmx_dfa_memfcr_s cn58xx;
+ struct cvmx_dfa_memfcr_s cn58xxp1;
+} cvmx_dfa_memfcr_t;
+
+
+/**
+ * cvmx_dfa_memrld
+ *
+ * DFA_MEMRLD = DFA RLDRAM MRS Register Values
+ *
+ * Description:
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_memrld_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_23_63 : 41;
+ uint64_t mrsdat : 23; /**< This field represents the data driven onto the
+ A[22:0] address lines during MRS(Mode Register Set)
+ commands (during a HW init sequence). This field
+ corresponds with the Mode Register Bit Map from
+ your RLDRAM-II device specific data sheet.
+ A[17:10]: RESERVED
+ A[9]: ODT (on die termination)
+ A[8]: Impedance Matching
+ A[7]: DLL Reset
+ A[6]: UNUSED
+ A[5]: Address Mux (for N3K: MUST BE ZERO)
+ A[4:3]: Burst Length (for N3K: MUST BE ZERO)
+ A[2:0]: Configuration (see data sheet for
+ specific RLDRAM-II device).
+ - 000-001: CFG=1 [tRC=4/tRL=4/tWL=5]
+ - 010: CFG=2 [tRC=6/tRL=6/tWL=7]
+ - 011: CFG=3 [tRC=8/tRL=8/tWL=9]
+ - 100-111: RESERVED
+ NOTE: For additional density, the RLDRAM-II parts
+ can be 'clamshelled' (ie: two devices mounted on
+ different sides of the PCB board), since the BGA
+ pinout supports 'mirroring'.
+ To support a clamshell design, SW must preload
+ the MRSDAT[22:0] with the proper A[22:0] pin mapping
+ which is dependent on the 'selected' bunk/clam
+ (see also: DFA_MEMCFG0[BUNK_INIT] field).
+ NOTE: Care MUST BE TAKEN NOT to write to this register
+ within 64K eclk cycles of a HW INIT (see: INIT_P0/INIT_P1).
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t mrsdat : 23;
+ uint64_t reserved_23_63 : 41;
+#endif
+ } s;
+ struct cvmx_dfa_memrld_s cn38xx;
+ struct cvmx_dfa_memrld_s cn38xxp2;
+ struct cvmx_dfa_memrld_s cn58xx;
+ struct cvmx_dfa_memrld_s cn58xxp1;
+} cvmx_dfa_memrld_t;
+
+
+/**
+ * cvmx_dfa_ncbctl
+ *
+ * DFA_NCBCTL = DFA NCB CTL Register
+ *
+ * Description:
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_ncbctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_11_63 : 53;
+ uint64_t sbdnum : 5; /**< SBD Debug Entry#
+ For internal use only. (DFA Scoreboard debug)
+ Selects which one of 32 DFA Scoreboard entries is
+ latched into the DFA_SBD_DBG[0-3] registers. */
+ uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe
+ For internal use only. (DFA Scoreboard debug)
+ When written with a '1', the DFA Scoreboard Debug
+ registers (DFA_SBD_DBG[0-3]) are all locked down.
+ This allows SW to lock down the contents of the entire
+ SBD for a single instant in time. All subsequent reads
+ of the DFA scoreboard registers will return the data
+ from that instant in time. */
+ uint64_t dcmode : 1; /**< DRF-CRQ/DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=CRQ/HP=DTE],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t dtmode : 1; /**< DRF-DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=DTE[15],...,HP=DTE[0]],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode
+ (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode
+ (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t imode : 1; /**< NCB-Inbound Arbiter
+ (0=FP [LP=NRQ,HP=NRP], 1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t imode : 1;
+ uint64_t qmode : 1;
+ uint64_t pmode : 1;
+ uint64_t dtmode : 1;
+ uint64_t dcmode : 1;
+ uint64_t sbdlck : 1;
+ uint64_t sbdnum : 5;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_dfa_ncbctl_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t sbdnum : 4; /**< SBD Debug Entry#
+ For internal use only. (DFA Scoreboard debug)
+ Selects which one of 16 DFA Scoreboard entries is
+ latched into the DFA_SBD_DBG[0-3] registers. */
+ uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe
+ For internal use only. (DFA Scoreboard debug)
+ When written with a '1', the DFA Scoreboard Debug
+ registers (DFA_SBD_DBG[0-3]) are all locked down.
+ This allows SW to lock down the contents of the entire
+ SBD for a single instant in time. All subsequent reads
+ of the DFA scoreboard registers will return the data
+ from that instant in time. */
+ uint64_t dcmode : 1; /**< DRF-CRQ/DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=CRQ/HP=DTE],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t dtmode : 1; /**< DRF-DTE Arbiter Mode
+ DTE-DRF Arbiter (0=FP [LP=DTE[15],...,HP=DTE[0]],1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode
+ (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode
+ (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+ uint64_t imode : 1; /**< NCB-Inbound Arbiter
+ (0=FP [LP=NRQ,HP=NRP], 1=RR)
+ NOTE: This should only be written to a different value
+ during power-on SW initialization. */
+#else
+ uint64_t imode : 1;
+ uint64_t qmode : 1;
+ uint64_t pmode : 1;
+ uint64_t dtmode : 1;
+ uint64_t dcmode : 1;
+ uint64_t sbdlck : 1;
+ uint64_t sbdnum : 4;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn38xx;
+ struct cvmx_dfa_ncbctl_cn38xx cn38xxp2;
+ struct cvmx_dfa_ncbctl_s cn58xx;
+ struct cvmx_dfa_ncbctl_s cn58xxp1;
+} cvmx_dfa_ncbctl_t;
+
+
+/**
+ * cvmx_dfa_rodt_comp_ctl
+ *
+ * DFA_RODT_COMP_CTL = DFA RLD Compensation control (For read "on die termination")
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_rodt_comp_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t enable : 1; /**< Read On Die Termination Enable
+ (0=disable, 1=enable) */
+ uint64_t reserved_12_15 : 4;
+ uint64_t nctl : 4; /**< Compensation control bits */
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5; /**< Compensation control bits */
+#else
+ uint64_t pctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 4;
+ uint64_t reserved_12_15 : 4;
+ uint64_t enable : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_dfa_rodt_comp_ctl_s cn58xx;
+ struct cvmx_dfa_rodt_comp_ctl_s cn58xxp1;
+} cvmx_dfa_rodt_comp_ctl_t;
+
+
+/**
+ * cvmx_dfa_sbd_dbg0
+ *
+ * DFA_SBD_DBG0 = DFA Scoreboard Debug \#0 Register
+ *
+ * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_sbd_dbg0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t sbd0 : 64; /**< DFA ScoreBoard \#0 Data
+ For internal use only! (DFA Scoreboard Debug)
+ [63:40] rptr[26:3]: Result Base Pointer
+ [39:24] rwcnt[15:0] Cumulative Result Write Counter
+ [23] lastgrdrsp: Last Gather-Rd Response
+ [22] wtgrdrsp: Waiting Gather-Rd Response
+ [21] wtgrdreq: Waiting for Gather-Rd Issue
+ [20] glvld: GLPTR/GLCNT Valid
+ [19] cmpmark: Completion Marked Node Detected
+ [18:17] cmpcode[1:0]: Completion Code
+ [0=PDGONE/1=PERR/2=RFULL/3=TERM]
+ [16] cmpdet: Completion Detected
+ [15] wthdrwrcmtrsp: Waiting for HDR RWrCmtRsp
+ [14] wtlastwrcmtrsp: Waiting for LAST RESULT
+ RWrCmtRsp
+ [13] hdrwrreq: Waiting for HDR RWrReq
+ [12] wtrwrreq: Waiting for RWrReq
+ [11] wtwqwrreq: Waiting for WQWrReq issue
+ [10] lastprdrspeot: Last Packet-Rd Response
+ [9] lastprdrsp: Last Packet-Rd Response
+ [8] wtprdrsp: Waiting for PRdRsp EOT
+ [7] wtprdreq: Waiting for PRdReq Issue
+ [6] lastpdvld: PDPTR/PDLEN Valid
+ [5] pdvld: Packet Data Valid
+ [4] wqvld: WQVLD
+ [3] wqdone: WorkQueue Done condition
+ a) WQWrReq issued(for WQPTR<>0) OR
+ b) HDR RWrCmtRsp completed)
+ [2] rwstf: Resultant write STF/P Mode
+ [1] pdldt: Packet-Data LDT mode
+ [0] gmode: Gather-Mode */
+#else
+ uint64_t sbd0 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_sbd_dbg0_s cn31xx;
+ struct cvmx_dfa_sbd_dbg0_s cn38xx;
+ struct cvmx_dfa_sbd_dbg0_s cn38xxp2;
+ struct cvmx_dfa_sbd_dbg0_s cn58xx;
+ struct cvmx_dfa_sbd_dbg0_s cn58xxp1;
+} cvmx_dfa_sbd_dbg0_t;
+
+
+/**
+ * cvmx_dfa_sbd_dbg1
+ *
+ * DFA_SBD_DBG1 = DFA Scoreboard Debug \#1 Register
+ *
+ * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_sbd_dbg1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t sbd1 : 64; /**< DFA ScoreBoard \#1 Data
+ For internal use only! (DFA Scoreboard Debug)
+ [63:61] wqptr[35:33]: Work Queue Pointer
+ [60:52] rptr[35:27]: Result Base Pointer
+ [51:16] pdptr[35:0]: Packet Data Pointer
+ [15:0] pdcnt[15:0]: Packet Data Counter */
+#else
+ uint64_t sbd1 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_sbd_dbg1_s cn31xx;
+ struct cvmx_dfa_sbd_dbg1_s cn38xx;
+ struct cvmx_dfa_sbd_dbg1_s cn38xxp2;
+ struct cvmx_dfa_sbd_dbg1_s cn58xx;
+ struct cvmx_dfa_sbd_dbg1_s cn58xxp1;
+} cvmx_dfa_sbd_dbg1_t;
+
+
+/**
+ * cvmx_dfa_sbd_dbg2
+ *
+ * DFA_SBD_DBG2 = DFA Scoreboard Debug \#2 Register
+ *
+ * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_sbd_dbg2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t sbd2 : 64; /**< DFA ScoreBoard \#2 Data
+ [63:49] wqptr[17:3]: Work Queue Pointer
+ [48:16] rwptr[35:3]: Result Write Pointer
+ [15:0] prwcnt[15:0]: Pending Result Write Counter */
+#else
+ uint64_t sbd2 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_sbd_dbg2_s cn31xx;
+ struct cvmx_dfa_sbd_dbg2_s cn38xx;
+ struct cvmx_dfa_sbd_dbg2_s cn38xxp2;
+ struct cvmx_dfa_sbd_dbg2_s cn58xx;
+ struct cvmx_dfa_sbd_dbg2_s cn58xxp1;
+} cvmx_dfa_sbd_dbg2_t;
+
+
+/**
+ * cvmx_dfa_sbd_dbg3
+ *
+ * DFA_SBD_DBG3 = DFA Scoreboard Debug \#3 Register
+ *
+ * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down.
+ * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the
+ * CSR read.
+ * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model)
+ * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an
+ * instruction.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_dfa_sbd_dbg3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t sbd3 : 64; /**< DFA ScoreBoard \#3 Data
+ [63:49] wqptr[32:18]: Work Queue Pointer
+ [48:16] glptr[35:3]: Gather List Pointer
+ [15:0] glcnt[15:0]: Gather List Counter */
+#else
+ uint64_t sbd3 : 64;
+#endif
+ } s;
+ struct cvmx_dfa_sbd_dbg3_s cn31xx;
+ struct cvmx_dfa_sbd_dbg3_s cn38xx;
+ struct cvmx_dfa_sbd_dbg3_s cn38xxp2;
+ struct cvmx_dfa_sbd_dbg3_s cn58xx;
+ struct cvmx_dfa_sbd_dbg3_s cn58xxp1;
+} cvmx_dfa_sbd_dbg3_t;
+
+
+/**
+ * cvmx_fpa_bist_status
+ *
+ * FPA_BIST_STATUS = BIST Status of FPA Memories
+ *
+ * The result of the BIST run on the FPA memories.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t frd : 1; /**< fpa_frd memory bist status. */
+ uint64_t fpf0 : 1; /**< fpa_fpf0 memory bist status. */
+ uint64_t fpf1 : 1; /**< fpa_fpf1 memory bist status. */
+ uint64_t ffr : 1; /**< fpa_ffr memory bist status. */
+ uint64_t fdr : 1; /**< fpa_fdr memory bist status. */
+#else
+ uint64_t fdr : 1;
+ uint64_t ffr : 1;
+ uint64_t fpf1 : 1;
+ uint64_t fpf0 : 1;
+ uint64_t frd : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_fpa_bist_status_s cn30xx;
+ struct cvmx_fpa_bist_status_s cn31xx;
+ struct cvmx_fpa_bist_status_s cn38xx;
+ struct cvmx_fpa_bist_status_s cn38xxp2;
+ struct cvmx_fpa_bist_status_s cn50xx;
+ struct cvmx_fpa_bist_status_s cn52xx;
+ struct cvmx_fpa_bist_status_s cn52xxp1;
+ struct cvmx_fpa_bist_status_s cn56xx;
+ struct cvmx_fpa_bist_status_s cn56xxp1;
+ struct cvmx_fpa_bist_status_s cn58xx;
+ struct cvmx_fpa_bist_status_s cn58xxp1;
+} cvmx_fpa_bist_status_t;
+
+
+/**
+ * cvmx_fpa_ctl_status
+ *
+ * FPA_CTL_STATUS = FPA's Control/Status Register
+ *
+ * The FPA's interrupt enable register.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_ctl_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_18_63 : 46;
+ uint64_t reset : 1; /**< When set causes a reset of the FPA with the
+ exception of the RSL. */
+ uint64_t use_ldt : 1; /**< When clear '0' the FPA will use LDT to load
+ pointers from the L2C. */
+ uint64_t use_stt : 1; /**< When clear '0' the FPA will use STT to store
+ pointers to the L2C. */
+ uint64_t enb : 1; /**< Must be set to 1 AFTER writing all config registers
+ and 10 cycles have past. If any of the config
+ register are written after writing this bit the
+ FPA may begin to operate incorrectly. */
+ uint64_t mem1_err : 7; /**< Causes a flip of the ECC bit associated 38:32
+ respective to bit 6:0 of this field, for FPF
+ FIFO 1. */
+ uint64_t mem0_err : 7; /**< Causes a flip of the ECC bit associated 38:32
+ respective to bit 6:0 of this field, for FPF
+ FIFO 0. */
+#else
+ uint64_t mem0_err : 7;
+ uint64_t mem1_err : 7;
+ uint64_t enb : 1;
+ uint64_t use_stt : 1;
+ uint64_t use_ldt : 1;
+ uint64_t reset : 1;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_fpa_ctl_status_s cn30xx;
+ struct cvmx_fpa_ctl_status_s cn31xx;
+ struct cvmx_fpa_ctl_status_s cn38xx;
+ struct cvmx_fpa_ctl_status_s cn38xxp2;
+ struct cvmx_fpa_ctl_status_s cn50xx;
+ struct cvmx_fpa_ctl_status_s cn52xx;
+ struct cvmx_fpa_ctl_status_s cn52xxp1;
+ struct cvmx_fpa_ctl_status_s cn56xx;
+ struct cvmx_fpa_ctl_status_s cn56xxp1;
+ struct cvmx_fpa_ctl_status_s cn58xx;
+ struct cvmx_fpa_ctl_status_s cn58xxp1;
+} cvmx_fpa_ctl_status_t;
+
+
+/**
+ * cvmx_fpa_fpf#_marks
+ *
+ * FPA_FPF1_MARKS = FPA's Queue 1 Free Page FIFO Read Write Marks
+ *
+ * The high and low watermark register that determines when we write and read free pages from L2C
+ * for Queue 1. The value of FPF_RD and FPF_WR should have at least a 33 diffrence. Recommend value
+ * is FPF_RD == (FPA_FPF#_SIZE[FPF_SIZ] * .25) and FPF_WR == (FPA_FPF#_SIZE[FPF_SIZ] * .75)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_fpfx_marks_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_22_63 : 42;
+ uint64_t fpf_wr : 11; /**< When the number of free-page-pointers in a
+ queue exceeds this value the FPA will write
+ 32-page-pointers of that queue to DRAM.
+ The MAX value for this field should be
+ FPA_FPF0_SIZE[FPF_SIZ]-2. */
+ uint64_t fpf_rd : 11; /**< When the number of free-page-pointers in a
+ queue drops below this value amd there are
+ free-page-pointers in DRAM, the FPA will
+ read one page (32 pointers) from DRAM.
+ This maximum value for this field should be
+ FPA_FPF0_SIZE[FPF_SIZ]-34. The min number
+ for this would be 16. */
+#else
+ uint64_t fpf_rd : 11;
+ uint64_t fpf_wr : 11;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_fpa_fpfx_marks_s cn38xx;
+ struct cvmx_fpa_fpfx_marks_s cn38xxp2;
+ struct cvmx_fpa_fpfx_marks_s cn56xx;
+ struct cvmx_fpa_fpfx_marks_s cn56xxp1;
+ struct cvmx_fpa_fpfx_marks_s cn58xx;
+ struct cvmx_fpa_fpfx_marks_s cn58xxp1;
+} cvmx_fpa_fpfx_marks_t;
+
+
+/**
+ * cvmx_fpa_fpf#_size
+ *
+ * FPA_FPFX_SIZE = FPA's Queue 1-7 Free Page FIFO Size
+ *
+ * The number of page pointers that will be kept local to the FPA for this Queue. FPA Queues are
+ * assigned in order from Queue 0 to Queue 7, though only Queue 0 through Queue x can be used.
+ * The sum of the 8 (0-7) FPA_FPF#_SIZE registers must be limited to 2048.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_fpfx_size_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_11_63 : 53;
+ uint64_t fpf_siz : 11; /**< The number of entries assigned in the FPA FIFO
+ (used to hold page-pointers) for this Queue.
+ The value of this register must divisable by 2,
+ and the FPA will ignore bit [0] of this register.
+ The total of the FPF_SIZ field of the 8 (0-7)
+ FPA_FPF#_SIZE registers must not exceed 2048.
+ After writing this field the FPA will need 10
+ core clock cycles to be ready for operation. The
+ assignment of location in the FPA FIFO must
+ start with Queue 0, then 1, 2, etc.
+ The number of useable entries will be FPF_SIZ-2. */
+#else
+ uint64_t fpf_siz : 11;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_fpa_fpfx_size_s cn38xx;
+ struct cvmx_fpa_fpfx_size_s cn38xxp2;
+ struct cvmx_fpa_fpfx_size_s cn56xx;
+ struct cvmx_fpa_fpfx_size_s cn56xxp1;
+ struct cvmx_fpa_fpfx_size_s cn58xx;
+ struct cvmx_fpa_fpfx_size_s cn58xxp1;
+} cvmx_fpa_fpfx_size_t;
+
+
+/**
+ * cvmx_fpa_fpf0_marks
+ *
+ * FPA_FPF0_MARKS = FPA's Queue 0 Free Page FIFO Read Write Marks
+ *
+ * The high and low watermark register that determines when we write and read free pages from L2C
+ * for Queue 0. The value of FPF_RD and FPF_WR should have at least a 33 diffrence. Recommend value
+ * is FPF_RD == (FPA_FPF#_SIZE[FPF_SIZ] * .25) and FPF_WR == (FPA_FPF#_SIZE[FPF_SIZ] * .75)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_fpf0_marks_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_24_63 : 40;
+ uint64_t fpf_wr : 12; /**< When the number of free-page-pointers in a
+ queue exceeds this value the FPA will write
+ 32-page-pointers of that queue to DRAM.
+ The MAX value for this field should be
+ FPA_FPF0_SIZE[FPF_SIZ]-2. */
+ uint64_t fpf_rd : 12; /**< When the number of free-page-pointers in a
+ queue drops below this value amd there are
+ free-page-pointers in DRAM, the FPA will
+ read one page (32 pointers) from DRAM.
+ This maximum value for this field should be
+ FPA_FPF0_SIZE[FPF_SIZ]-34. The min number
+ for this would be 16. */
+#else
+ uint64_t fpf_rd : 12;
+ uint64_t fpf_wr : 12;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+ struct cvmx_fpa_fpf0_marks_s cn38xx;
+ struct cvmx_fpa_fpf0_marks_s cn38xxp2;
+ struct cvmx_fpa_fpf0_marks_s cn56xx;
+ struct cvmx_fpa_fpf0_marks_s cn56xxp1;
+ struct cvmx_fpa_fpf0_marks_s cn58xx;
+ struct cvmx_fpa_fpf0_marks_s cn58xxp1;
+} cvmx_fpa_fpf0_marks_t;
+
+
+/**
+ * cvmx_fpa_fpf0_size
+ *
+ * FPA_FPF0_SIZE = FPA's Queue 0 Free Page FIFO Size
+ *
+ * The number of page pointers that will be kept local to the FPA for this Queue. FPA Queues are
+ * assigned in order from Queue 0 to Queue 7, though only Queue 0 through Queue x can be used.
+ * The sum of the 8 (0-7) FPA_FPF#_SIZE registers must be limited to 2048.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_fpf0_size_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t fpf_siz : 12; /**< The number of entries assigned in the FPA FIFO
+ (used to hold page-pointers) for this Queue.
+ The value of this register must divisable by 2,
+ and the FPA will ignore bit [0] of this register.
+ The total of the FPF_SIZ field of the 8 (0-7)
+ FPA_FPF#_SIZE registers must not exceed 2048.
+ After writing this field the FPA will need 10
+ core clock cycles to be ready for operation. The
+ assignment of location in the FPA FIFO must
+ start with Queue 0, then 1, 2, etc.
+ The number of useable entries will be FPF_SIZ-2. */
+#else
+ uint64_t fpf_siz : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_fpa_fpf0_size_s cn38xx;
+ struct cvmx_fpa_fpf0_size_s cn38xxp2;
+ struct cvmx_fpa_fpf0_size_s cn56xx;
+ struct cvmx_fpa_fpf0_size_s cn56xxp1;
+ struct cvmx_fpa_fpf0_size_s cn58xx;
+ struct cvmx_fpa_fpf0_size_s cn58xxp1;
+} cvmx_fpa_fpf0_size_t;
+
+
+/**
+ * cvmx_fpa_int_enb
+ *
+ * FPA_INT_ENB = FPA's Interrupt Enable
+ *
+ * The FPA's interrupt enable register.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_int_enb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_28_63 : 36;
+ uint64_t q7_perr : 1; /**< When set (1) and bit 27 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_coff : 1; /**< When set (1) and bit 26 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q7_und : 1; /**< When set (1) and bit 25 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_perr : 1; /**< When set (1) and bit 24 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_coff : 1; /**< When set (1) and bit 23 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q6_und : 1; /**< When set (1) and bit 22 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_perr : 1; /**< When set (1) and bit 21 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_coff : 1; /**< When set (1) and bit 20 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q5_und : 1; /**< When set (1) and bit 19 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_perr : 1; /**< When set (1) and bit 18 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_coff : 1; /**< When set (1) and bit 17 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q4_und : 1; /**< When set (1) and bit 16 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_perr : 1; /**< When set (1) and bit 15 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_coff : 1; /**< When set (1) and bit 14 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q3_und : 1; /**< When set (1) and bit 13 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_perr : 1; /**< When set (1) and bit 12 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_coff : 1; /**< When set (1) and bit 11 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q2_und : 1; /**< When set (1) and bit 10 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_perr : 1; /**< When set (1) and bit 9 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_coff : 1; /**< When set (1) and bit 8 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q1_und : 1; /**< When set (1) and bit 7 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_perr : 1; /**< When set (1) and bit 6 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_coff : 1; /**< When set (1) and bit 5 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t q0_und : 1; /**< When set (1) and bit 4 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_dbe : 1; /**< When set (1) and bit 3 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed1_sbe : 1; /**< When set (1) and bit 2 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_dbe : 1; /**< When set (1) and bit 1 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+ uint64_t fed0_sbe : 1; /**< When set (1) and bit 0 of the FPA_INT_SUM
+ register is asserted the FPA will assert an
+ interrupt. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_fpa_int_enb_s cn30xx;
+ struct cvmx_fpa_int_enb_s cn31xx;
+ struct cvmx_fpa_int_enb_s cn38xx;
+ struct cvmx_fpa_int_enb_s cn38xxp2;
+ struct cvmx_fpa_int_enb_s cn50xx;
+ struct cvmx_fpa_int_enb_s cn52xx;
+ struct cvmx_fpa_int_enb_s cn52xxp1;
+ struct cvmx_fpa_int_enb_s cn56xx;
+ struct cvmx_fpa_int_enb_s cn56xxp1;
+ struct cvmx_fpa_int_enb_s cn58xx;
+ struct cvmx_fpa_int_enb_s cn58xxp1;
+} cvmx_fpa_int_enb_t;
+
+
+/**
+ * cvmx_fpa_int_sum
+ *
+ * FPA_INT_SUM = FPA's Interrupt Summary Register
+ *
+ * Contains the diffrent interrupt summary bits of the FPA.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_int_sum_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_28_63 : 36;
+ uint64_t q7_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q7_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q7_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q6_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q6_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q6_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q5_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q5_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q5_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q4_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q4_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q4_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q3_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q3_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q3_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q2_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q2_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than than pointers
+ present in the FPA. */
+ uint64_t q2_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q1_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q1_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than pointers
+ present in the FPA. */
+ uint64_t q1_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t q0_perr : 1; /**< Set when a Queue0 pointer read from the stack in
+ the L2C does not have the FPA owner ship bit set. */
+ uint64_t q0_coff : 1; /**< Set when a Queue0 stack end tag is present and
+ the count available is greater than pointers
+ present in the FPA. */
+ uint64_t q0_und : 1; /**< Set when a Queue0 page count available goes
+ negative. */
+ uint64_t fed1_dbe : 1; /**< Set when a Double Bit Error is detected in FPF1. */
+ uint64_t fed1_sbe : 1; /**< Set when a Single Bit Error is detected in FPF1. */
+ uint64_t fed0_dbe : 1; /**< Set when a Double Bit Error is detected in FPF0. */
+ uint64_t fed0_sbe : 1; /**< Set when a Single Bit Error is detected in FPF0. */
+#else
+ uint64_t fed0_sbe : 1;
+ uint64_t fed0_dbe : 1;
+ uint64_t fed1_sbe : 1;
+ uint64_t fed1_dbe : 1;
+ uint64_t q0_und : 1;
+ uint64_t q0_coff : 1;
+ uint64_t q0_perr : 1;
+ uint64_t q1_und : 1;
+ uint64_t q1_coff : 1;
+ uint64_t q1_perr : 1;
+ uint64_t q2_und : 1;
+ uint64_t q2_coff : 1;
+ uint64_t q2_perr : 1;
+ uint64_t q3_und : 1;
+ uint64_t q3_coff : 1;
+ uint64_t q3_perr : 1;
+ uint64_t q4_und : 1;
+ uint64_t q4_coff : 1;
+ uint64_t q4_perr : 1;
+ uint64_t q5_und : 1;
+ uint64_t q5_coff : 1;
+ uint64_t q5_perr : 1;
+ uint64_t q6_und : 1;
+ uint64_t q6_coff : 1;
+ uint64_t q6_perr : 1;
+ uint64_t q7_und : 1;
+ uint64_t q7_coff : 1;
+ uint64_t q7_perr : 1;
+ uint64_t reserved_28_63 : 36;
+#endif
+ } s;
+ struct cvmx_fpa_int_sum_s cn30xx;
+ struct cvmx_fpa_int_sum_s cn31xx;
+ struct cvmx_fpa_int_sum_s cn38xx;
+ struct cvmx_fpa_int_sum_s cn38xxp2;
+ struct cvmx_fpa_int_sum_s cn50xx;
+ struct cvmx_fpa_int_sum_s cn52xx;
+ struct cvmx_fpa_int_sum_s cn52xxp1;
+ struct cvmx_fpa_int_sum_s cn56xx;
+ struct cvmx_fpa_int_sum_s cn56xxp1;
+ struct cvmx_fpa_int_sum_s cn58xx;
+ struct cvmx_fpa_int_sum_s cn58xxp1;
+} cvmx_fpa_int_sum_t;
+
+
+/**
+ * cvmx_fpa_que#_available
+ *
+ * FPA_QUEX_PAGES_AVAILABLE = FPA's Queue 0-7 Free Page Available Register
+ *
+ * The number of page pointers that are available in the FPA and local DRAM.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_quex_available_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_29_63 : 35;
+ uint64_t que_siz : 29; /**< The number of free pages available in this Queue. */
+#else
+ uint64_t que_siz : 29;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_fpa_quex_available_s cn30xx;
+ struct cvmx_fpa_quex_available_s cn31xx;
+ struct cvmx_fpa_quex_available_s cn38xx;
+ struct cvmx_fpa_quex_available_s cn38xxp2;
+ struct cvmx_fpa_quex_available_s cn50xx;
+ struct cvmx_fpa_quex_available_s cn52xx;
+ struct cvmx_fpa_quex_available_s cn52xxp1;
+ struct cvmx_fpa_quex_available_s cn56xx;
+ struct cvmx_fpa_quex_available_s cn56xxp1;
+ struct cvmx_fpa_quex_available_s cn58xx;
+ struct cvmx_fpa_quex_available_s cn58xxp1;
+} cvmx_fpa_quex_available_t;
+
+
+/**
+ * cvmx_fpa_que#_page_index
+ *
+ * FPA_QUE0_PAGE_INDEX = FPA's Queue0 Page Index
+ *
+ * The present index page for queue 0 of the FPA.
+ * This numbr reflests the number of pages of pointers that have been written to memory
+ * for this queue.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_quex_page_index_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_25_63 : 39;
+ uint64_t pg_num : 25; /**< Page number. */
+#else
+ uint64_t pg_num : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_fpa_quex_page_index_s cn30xx;
+ struct cvmx_fpa_quex_page_index_s cn31xx;
+ struct cvmx_fpa_quex_page_index_s cn38xx;
+ struct cvmx_fpa_quex_page_index_s cn38xxp2;
+ struct cvmx_fpa_quex_page_index_s cn50xx;
+ struct cvmx_fpa_quex_page_index_s cn52xx;
+ struct cvmx_fpa_quex_page_index_s cn52xxp1;
+ struct cvmx_fpa_quex_page_index_s cn56xx;
+ struct cvmx_fpa_quex_page_index_s cn56xxp1;
+ struct cvmx_fpa_quex_page_index_s cn58xx;
+ struct cvmx_fpa_quex_page_index_s cn58xxp1;
+} cvmx_fpa_quex_page_index_t;
+
+
+/**
+ * cvmx_fpa_que_act
+ *
+ * FPA_QUE_ACT = FPA's Queue# Actual Page Index
+ *
+ * When a INT_SUM[PERR#] occurs this will be latched with the value read from L2C.
+ * This is latched on the first error and will not latch again unitl all errors are cleared.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_que_act_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_29_63 : 35;
+ uint64_t act_que : 3; /**< FPA-queue-number read from memory. */
+ uint64_t act_indx : 26; /**< Page number read from memory. */
+#else
+ uint64_t act_indx : 26;
+ uint64_t act_que : 3;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_fpa_que_act_s cn30xx;
+ struct cvmx_fpa_que_act_s cn31xx;
+ struct cvmx_fpa_que_act_s cn38xx;
+ struct cvmx_fpa_que_act_s cn38xxp2;
+ struct cvmx_fpa_que_act_s cn50xx;
+ struct cvmx_fpa_que_act_s cn52xx;
+ struct cvmx_fpa_que_act_s cn52xxp1;
+ struct cvmx_fpa_que_act_s cn56xx;
+ struct cvmx_fpa_que_act_s cn56xxp1;
+ struct cvmx_fpa_que_act_s cn58xx;
+ struct cvmx_fpa_que_act_s cn58xxp1;
+} cvmx_fpa_que_act_t;
+
+
+/**
+ * cvmx_fpa_que_exp
+ *
+ * FPA_QUE_EXP = FPA's Queue# Expected Page Index
+ *
+ * When a INT_SUM[PERR#] occurs this will be latched with the expected value.
+ * This is latched on the first error and will not latch again unitl all errors are cleared.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_que_exp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_29_63 : 35;
+ uint64_t exp_que : 3; /**< Expected fpa-queue-number read from memory. */
+ uint64_t exp_indx : 26; /**< Expected page number read from memory. */
+#else
+ uint64_t exp_indx : 26;
+ uint64_t exp_que : 3;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_fpa_que_exp_s cn30xx;
+ struct cvmx_fpa_que_exp_s cn31xx;
+ struct cvmx_fpa_que_exp_s cn38xx;
+ struct cvmx_fpa_que_exp_s cn38xxp2;
+ struct cvmx_fpa_que_exp_s cn50xx;
+ struct cvmx_fpa_que_exp_s cn52xx;
+ struct cvmx_fpa_que_exp_s cn52xxp1;
+ struct cvmx_fpa_que_exp_s cn56xx;
+ struct cvmx_fpa_que_exp_s cn56xxp1;
+ struct cvmx_fpa_que_exp_s cn58xx;
+ struct cvmx_fpa_que_exp_s cn58xxp1;
+} cvmx_fpa_que_exp_t;
+
+
+/**
+ * cvmx_fpa_wart_ctl
+ *
+ * FPA_WART_CTL = FPA's WART Control
+ *
+ * Control and status for the WART block.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_wart_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t ctl : 16; /**< Control information. */
+#else
+ uint64_t ctl : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_fpa_wart_ctl_s cn30xx;
+ struct cvmx_fpa_wart_ctl_s cn31xx;
+ struct cvmx_fpa_wart_ctl_s cn38xx;
+ struct cvmx_fpa_wart_ctl_s cn38xxp2;
+ struct cvmx_fpa_wart_ctl_s cn50xx;
+ struct cvmx_fpa_wart_ctl_s cn52xx;
+ struct cvmx_fpa_wart_ctl_s cn52xxp1;
+ struct cvmx_fpa_wart_ctl_s cn56xx;
+ struct cvmx_fpa_wart_ctl_s cn56xxp1;
+ struct cvmx_fpa_wart_ctl_s cn58xx;
+ struct cvmx_fpa_wart_ctl_s cn58xxp1;
+} cvmx_fpa_wart_ctl_t;
+
+
+/**
+ * cvmx_fpa_wart_status
+ *
+ * FPA_WART_STATUS = FPA's WART Status
+ *
+ * Control and status for the WART block.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_fpa_wart_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t status : 32; /**< Status information. */
+#else
+ uint64_t status : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_fpa_wart_status_s cn30xx;
+ struct cvmx_fpa_wart_status_s cn31xx;
+ struct cvmx_fpa_wart_status_s cn38xx;
+ struct cvmx_fpa_wart_status_s cn38xxp2;
+ struct cvmx_fpa_wart_status_s cn50xx;
+ struct cvmx_fpa_wart_status_s cn52xx;
+ struct cvmx_fpa_wart_status_s cn52xxp1;
+ struct cvmx_fpa_wart_status_s cn56xx;
+ struct cvmx_fpa_wart_status_s cn56xxp1;
+ struct cvmx_fpa_wart_status_s cn58xx;
+ struct cvmx_fpa_wart_status_s cn58xxp1;
+} cvmx_fpa_wart_status_t;
+
+
+/**
+ * cvmx_gmx#_bad_reg
+ *
+ * GMX_BAD_REG = A collection of things that have gone very, very wrong
+ *
+ *
+ * Notes:
+ * In XAUI mode, only the lsb (corresponding to port0) of INB_NXA, LOSTSTAT, OUT_OVR, are used.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_bad_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_31_63 : 33;
+ uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t loststat : 4; /**< TX Statistics data was over-written (per RGM port)
+ TX Stats are corrupted */
+ uint64_t reserved_18_21 : 4;
+ uint64_t out_ovr : 16; /**< Outbound data FIFO overflow (per port) */
+ uint64_t ncb_ovr : 1; /**< Outbound NCB FIFO Overflow */
+ uint64_t out_col : 1; /**< Outbound collision occured between PKO and NCB */
+#else
+ uint64_t out_col : 1;
+ uint64_t ncb_ovr : 1;
+ uint64_t out_ovr : 16;
+ uint64_t reserved_18_21 : 4;
+ uint64_t loststat : 4;
+ uint64_t statovr : 1;
+ uint64_t inb_nxa : 4;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } s;
+ struct cvmx_gmxx_bad_reg_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_31_63 : 33;
+ uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t reserved_25_25 : 1;
+ uint64_t loststat : 3; /**< TX Statistics data was over-written (per RGM port)
+ TX Stats are corrupted */
+ uint64_t reserved_5_21 : 17;
+ uint64_t out_ovr : 3; /**< Outbound data FIFO overflow (per port) */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 3;
+ uint64_t reserved_5_21 : 17;
+ uint64_t loststat : 3;
+ uint64_t reserved_25_25 : 1;
+ uint64_t statovr : 1;
+ uint64_t inb_nxa : 4;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_bad_reg_cn30xx cn31xx;
+ struct cvmx_gmxx_bad_reg_s cn38xx;
+ struct cvmx_gmxx_bad_reg_s cn38xxp2;
+ struct cvmx_gmxx_bad_reg_cn30xx cn50xx;
+ struct cvmx_gmxx_bad_reg_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_31_63 : 33;
+ uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */
+ uint64_t statovr : 1; /**< TX Statistics overflow
+ The common FIFO to SGMII and XAUI had an overflow
+ TX Stats are corrupted */
+ uint64_t loststat : 4; /**< TX Statistics data was over-written
+ In SGMII, one bit per port
+ In XAUI, only port0 is used
+ TX Stats are corrupted */
+ uint64_t reserved_6_21 : 16;
+ uint64_t out_ovr : 4; /**< Outbound data FIFO overflow (per port) */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 4;
+ uint64_t reserved_6_21 : 16;
+ uint64_t loststat : 4;
+ uint64_t statovr : 1;
+ uint64_t inb_nxa : 4;
+ uint64_t reserved_31_63 : 33;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1;
+ struct cvmx_gmxx_bad_reg_cn52xx cn56xx;
+ struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1;
+ struct cvmx_gmxx_bad_reg_s cn58xx;
+ struct cvmx_gmxx_bad_reg_s cn58xxp1;
+} cvmx_gmxx_bad_reg_t;
+
+
+/**
+ * cvmx_gmx#_bist
+ *
+ * GMX_BIST = GMX BIST Results
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_bist_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t status : 17; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.fif_bnk0
+ - 1: gmx#.inb.fif_bnk1
+ - 2: gmx#.inb.fif_bnk2
+ - 3: gmx#.inb.fif_bnk3
+ - 4: gmx#.outb.fif.fif_bnk0
+ - 5: gmx#.outb.fif.fif_bnk1
+ - 6: gmx#.outb.fif.fif_bnk2
+ - 7: gmx#.outb.fif.fif_bnk3
+ - 8: gmx#.csr.gmi0.srf8x64m1_bist
+ - 9: gmx#.csr.gmi1.srf8x64m1_bist
+ - 10: gmx#.csr.gmi2.srf8x64m1_bist
+ - 11: gmx#.csr.gmi3.srf8x64m1_bist
+ - 12: gmx#.csr.drf20x80m1_bist
+ - 13: gmx#.outb.stat.drf16x27m1_bist
+ - 14: gmx#.outb.stat.drf40x64m1_bist
+ - 15: gmx#.outb.ncb.drf16x76m1_bist
+ - 16: gmx#.outb.fif.srf32x16m2_bist */
+#else
+ uint64_t status : 17;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_gmxx_bist_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t status : 10; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.dpr512x78m4_bist
+ - 1: gmx#.outb.fif.dpr512x71m4_bist
+ - 2: gmx#.csr.gmi0.srf8x64m1_bist
+ - 3: gmx#.csr.gmi1.srf8x64m1_bist
+ - 4: gmx#.csr.gmi2.srf8x64m1_bist
+ - 5: 0
+ - 6: gmx#.csr.drf20x80m1_bist
+ - 7: gmx#.outb.stat.drf16x27m1_bist
+ - 8: gmx#.outb.stat.drf40x64m1_bist
+ - 9: 0 */
+#else
+ uint64_t status : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_bist_cn30xx cn31xx;
+ struct cvmx_gmxx_bist_cn30xx cn38xx;
+ struct cvmx_gmxx_bist_cn30xx cn38xxp2;
+ struct cvmx_gmxx_bist_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t status : 12; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails */
+#else
+ uint64_t status : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_bist_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t status : 16; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.fif_bnk0
+ - 1: gmx#.inb.fif_bnk1
+ - 2: gmx#.inb.fif_bnk2
+ - 3: gmx#.inb.fif_bnk3
+ - 4: gmx#.outb.fif.fif_bnk0
+ - 5: gmx#.outb.fif.fif_bnk1
+ - 6: gmx#.outb.fif.fif_bnk2
+ - 7: gmx#.outb.fif.fif_bnk3
+ - 8: gmx#.csr.gmi0.srf8x64m1_bist
+ - 9: gmx#.csr.gmi1.srf8x64m1_bist
+ - 10: gmx#.csr.gmi2.srf8x64m1_bist
+ - 11: gmx#.csr.gmi3.srf8x64m1_bist
+ - 12: gmx#.csr.drf20x80m1_bist
+ - 13: gmx#.outb.stat.drf16x27m1_bist
+ - 14: gmx#.outb.stat.drf40x64m1_bist
+ - 15: xgmii.tx.drf16x38m1_async_bist */
+#else
+ uint64_t status : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_bist_cn52xx cn52xxp1;
+ struct cvmx_gmxx_bist_cn52xx cn56xx;
+ struct cvmx_gmxx_bist_cn52xx cn56xxp1;
+ struct cvmx_gmxx_bist_s cn58xx;
+ struct cvmx_gmxx_bist_s cn58xxp1;
+} cvmx_gmxx_bist_t;
+
+
+/**
+ * cvmx_gmx#_clk_en
+ *
+ * DO NOT DOCUMENT THIS REGISTER - IT IS NOT OFFICIAL
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_clk_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t clk_en : 1; /**< Force the clock enables on */
+#else
+ uint64_t clk_en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_gmxx_clk_en_s cn52xx;
+ struct cvmx_gmxx_clk_en_s cn52xxp1;
+ struct cvmx_gmxx_clk_en_s cn56xx;
+ struct cvmx_gmxx_clk_en_s cn56xxp1;
+} cvmx_gmxx_clk_en_t;
+
+
+/**
+ * cvmx_gmx#_hg2_control
+ *
+ * Notes:
+ * The HiGig2 TX and RX enable would normally be both set together for HiGig2 messaging. However
+ * setting just the TX or RX bit will result in only the HG2 message transmit or the receive
+ * capability.
+ * PHYS_EN and LOGL_EN bits when 1, allow link pause or back pressure to PKO as per received
+ * HiGig2 message. When 0, link pause and back pressure to PKO in response to received messages
+ * are disabled.
+ *
+ * GMX*_TX_XAUI_CTL[HG_EN] must be set to one(to enable HiGig) whenever either HG2TX_EN or HG2RX_EN
+ * are set.
+ *
+ * GMX*_RX0_UDD_SKP[LEN] must be set to 16 (to select HiGig2) whenever either HG2TX_EN or HG2RX_EN
+ * are set.
+ *
+ * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero
+ * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol when
+ * GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by GMX*_TX_XAUI_CTL[HG_EN]=1
+ * and GMX*_RX0_UDD_SKP[LEN]=16.) The HW can only auto-generate backpressure via HiGig2 messages
+ * (optionally, when HG2TX_EN=1) with the HiGig2 protocol.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_hg2_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_19_63 : 45;
+ uint64_t hg2tx_en : 1; /**< Enable Transmission of HG2 phys and logl messages
+ When set, also disables HW auto-generated (802.3
+ and CBFC) pause frames. (OCTEON cannot generate
+ proper 802.3 or CBFC pause frames in HiGig2 mode.) */
+ uint64_t hg2rx_en : 1; /**< Enable extraction and processing of HG2 message
+ packet from RX flow. Physical logical pause info
+ is used to pause physical link, back pressure PKO
+ HG2RX_EN must be set when HiGig2 messages are
+ present in the receive stream. */
+ uint64_t phys_en : 1; /**< 1 bit physical link pause enable for recevied
+ HiGig2 physical pause message */
+ uint64_t logl_en : 16; /**< 16 bit xof enables for recevied HiGig2 messages
+ or CBFC packets */
+#else
+ uint64_t logl_en : 16;
+ uint64_t phys_en : 1;
+ uint64_t hg2rx_en : 1;
+ uint64_t hg2tx_en : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } s;
+ struct cvmx_gmxx_hg2_control_s cn52xx;
+ struct cvmx_gmxx_hg2_control_s cn52xxp1;
+ struct cvmx_gmxx_hg2_control_s cn56xx;
+} cvmx_gmxx_hg2_control_t;
+
+
+/**
+ * cvmx_gmx#_inf_mode
+ *
+ * GMX_INF_MODE = Interface Mode
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_inf_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t speed : 2; /**< Interface Speed
+ - 0: 1.250GHz
+ - 1: 2.500GHz
+ - 2: 3.125GHz
+ - 3: 3.750GHz */
+ uint64_t reserved_6_7 : 2;
+ uint64_t mode : 2; /**< Interface Electrical Operating Mode
+ - 0: Disabled (PCIe)
+ - 1: XAUI (IEEE 802.3-2005)
+ - 2: SGMII (v1.8)
+ - 3: PICMG3.1 */
+ uint64_t reserved_3_3 : 1;
+ uint64_t p0mii : 1; /**< Port 0 Interface Mode
+ - 0: Port 0 is RGMII
+ - 1: Port 0 is MII */
+ uint64_t en : 1; /**< Interface Enable */
+ uint64_t type : 1; /**< Interface Mode
+ - 0: RGMII Mode
+ - 1: Spi4 Mode */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t p0mii : 1;
+ uint64_t reserved_3_3 : 1;
+ uint64_t mode : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t speed : 2;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_gmxx_inf_mode_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_3_63 : 61;
+ uint64_t p0mii : 1; /**< Port 0 Interface Mode
+ - 0: Port 0 is RGMII
+ - 1: Port 0 is MII */
+ uint64_t en : 1; /**< Interface Enable
+ Must be set to enable the packet interface.
+ Should be enabled before any other requests to
+ GMX including enabling port back pressure with
+ IPD_CTL_STATUS[PBP_EN] */
+ uint64_t type : 1; /**< Port 1/2 Interface Mode
+ - 0: Ports 1 and 2 are RGMII
+ - 1: Port 1 is GMII/MII, Port 2 is unused
+ GMII/MII is selected by GMX_PRT1_CFG[SPEED] */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t p0mii : 1;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_inf_mode_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t en : 1; /**< Interface Enable
+ Must be set to enable the packet interface.
+ Should be enabled before any other requests to
+ GMX including enabling port back pressure with
+ IPD_CTL_STATUS[PBP_EN] */
+ uint64_t type : 1; /**< Interface Mode
+ - 0: All three ports are RGMII ports
+ - 1: prt0 is RGMII, prt1 is GMII, and prt2 is unused */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_gmxx_inf_mode_cn31xx cn38xx;
+ struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2;
+ struct cvmx_gmxx_inf_mode_cn30xx cn50xx;
+ struct cvmx_gmxx_inf_mode_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t speed : 2; /**< Interface Speed
+ - 0: 1.250GHz
+ - 1: 2.500GHz
+ - 2: 3.125GHz
+ - 3: 3.750GHz */
+ uint64_t reserved_6_7 : 2;
+ uint64_t mode : 2; /**< Interface Electrical Operating Mode
+ - 0: Disabled (PCIe)
+ - 1: XAUI (IEEE 802.3-2005)
+ - 2: SGMII (v1.8)
+ - 3: PICMG3.1 */
+ uint64_t reserved_2_3 : 2;
+ uint64_t en : 1; /**< Interface Enable
+ Must be set to enable the packet interface.
+ Should be enabled before any other requests to
+ GMX including enabling port back pressure with
+ IPD_CTL_STATUS[PBP_EN] */
+ uint64_t type : 1; /**< Interface Protocol Type
+ - 0: SGMII/1000Base-X
+ - 1: XAUI */
+#else
+ uint64_t type : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_3 : 2;
+ uint64_t mode : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t speed : 2;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1;
+ struct cvmx_gmxx_inf_mode_cn52xx cn56xx;
+ struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1;
+ struct cvmx_gmxx_inf_mode_cn31xx cn58xx;
+ struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1;
+} cvmx_gmxx_inf_mode_t;
+
+
+/**
+ * cvmx_gmx#_nxa_adr
+ *
+ * GMX_NXA_ADR = NXA Port Address
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_nxa_adr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t prt : 6; /**< Logged address for NXA exceptions
+ The logged address will be from the first
+ exception that caused the problem. NCB has
+ higher priority than PKO and will win. */
+#else
+ uint64_t prt : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_gmxx_nxa_adr_s cn30xx;
+ struct cvmx_gmxx_nxa_adr_s cn31xx;
+ struct cvmx_gmxx_nxa_adr_s cn38xx;
+ struct cvmx_gmxx_nxa_adr_s cn38xxp2;
+ struct cvmx_gmxx_nxa_adr_s cn50xx;
+ struct cvmx_gmxx_nxa_adr_s cn52xx;
+ struct cvmx_gmxx_nxa_adr_s cn52xxp1;
+ struct cvmx_gmxx_nxa_adr_s cn56xx;
+ struct cvmx_gmxx_nxa_adr_s cn56xxp1;
+ struct cvmx_gmxx_nxa_adr_s cn58xx;
+ struct cvmx_gmxx_nxa_adr_s cn58xxp1;
+} cvmx_gmxx_nxa_adr_t;
+
+
+/**
+ * cvmx_gmx#_prt#_cbfc_ctl
+ *
+ * ** HG2 message CSRs end
+ *
+ *
+ * Notes:
+ * XOFF for a specific port is XOFF<prt> = (PHYS_EN<prt> & PHYS_BP) | (LOGL_EN<prt> & LOGL_BP<prt>)
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t phys_en : 16; /**< Determines which ports will have physical
+ backpressure pause packets.
+ The value pplaced in the Class Enable Vector
+ field of the CBFC pause packet will be
+ PHYS_EN | LOGL_EN */
+ uint64_t logl_en : 16; /**< Determines which ports will have logical
+ backpressure pause packets.
+ The value pplaced in the Class Enable Vector
+ field of the CBFC pause packet will be
+ PHYS_EN | LOGL_EN */
+ uint64_t phys_bp : 16; /**< When RX_EN is set and the HW is backpressuring any
+ ports (from either CBFC pause packets or the
+ GMX_TX_OVR_BP[TX_PRT_BP] register) and all ports
+ indiciated by PHYS_BP are backpressured, simulate
+ physical backpressure by defering all packets on
+ the transmitter. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t bck_en : 1; /**< Forward CBFC Pause information to BP block */
+ uint64_t drp_en : 1; /**< Drop Control CBFC Pause Frames */
+ uint64_t tx_en : 1; /**< When set, allow for CBFC Pause Packets
+ Must be clear in HiGig2 mode i.e. when
+ GMX_TX_XAUI_CTL[HG_EN]=1 and
+ GMX_RX_UDD_SKP[SKIP]=16. */
+ uint64_t rx_en : 1; /**< When set, allow for CBFC Pause Packets
+ Must be clear in HiGig2 mode i.e. when
+ GMX_TX_XAUI_CTL[HG_EN]=1 and
+ GMX_RX_UDD_SKP[SKIP]=16. */
+#else
+ uint64_t rx_en : 1;
+ uint64_t tx_en : 1;
+ uint64_t drp_en : 1;
+ uint64_t bck_en : 1;
+ uint64_t reserved_4_15 : 12;
+ uint64_t phys_bp : 16;
+ uint64_t logl_en : 16;
+ uint64_t phys_en : 16;
+#endif
+ } s;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx;
+} cvmx_gmxx_prtx_cbfc_ctl_t;
+
+
+/**
+ * cvmx_gmx#_prt#_cfg
+ *
+ * GMX_PRT_CFG = Port description
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_prtx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_14_63 : 50;
+ uint64_t tx_idle : 1; /**< TX Machine is idle */
+ uint64_t rx_idle : 1; /**< RX Machine is idle */
+ uint64_t reserved_9_11 : 3;
+ uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED]
+ 10 = 10Mbs operation
+ 00 = 100Mbs operation
+ 01 = 1000Mbs operation
+ 11 = Reserved
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
+ 0 = 512 bitimes (10/100Mbs operation)
+ 1 = 4096 bitimes (1000Mbs operation) */
+ uint64_t duplex : 1; /**< Duplex
+ 0 = Half Duplex (collisions/extentions/bursts)
+ 1 = Full Duplex */
+ uint64_t speed : 1; /**< Link Speed
+ 0 = 10/100Mbs operation
+ (GMX_TX_CLK[CLK_CNT] > 1)
+ 1 = 1000Mbs operation */
+ uint64_t en : 1; /**< Link Enable
+ When EN is clear, packets will not be received
+ or transmitted (including PAUSE and JAM packets).
+ If EN is cleared while a packet is currently
+ being received or transmitted, the packet will
+ be allowed to complete before the bus is idled.
+ On the RX side, subsequent packets in a burst
+ will be ignored. */
+#else
+ uint64_t en : 1;
+ uint64_t speed : 1;
+ uint64_t duplex : 1;
+ uint64_t slottime : 1;
+ uint64_t reserved_4_7 : 4;
+ uint64_t speed_msb : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t rx_idle : 1;
+ uint64_t tx_idle : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_gmxx_prtx_cfg_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
+ 0 = 512 bitimes (10/100Mbs operation)
+ 1 = 4096 bitimes (1000Mbs operation) */
+ uint64_t duplex : 1; /**< Duplex
+ 0 = Half Duplex (collisions/extentions/bursts)
+ 1 = Full Duplex */
+ uint64_t speed : 1; /**< Link Speed
+ 0 = 10/100Mbs operation
+ (in RGMII mode, GMX_TX_CLK[CLK_CNT] > 1)
+ (in MII mode, GMX_TX_CLK[CLK_CNT] == 1)
+ 1 = 1000Mbs operation */
+ uint64_t en : 1; /**< Link Enable
+ When EN is clear, packets will not be received
+ or transmitted (including PAUSE and JAM packets).
+ If EN is cleared while a packet is currently
+ being received or transmitted, the packet will
+ be allowed to complete before the bus is idled.
+ On the RX side, subsequent packets in a burst
+ will be ignored. */
+#else
+ uint64_t en : 1;
+ uint64_t speed : 1;
+ uint64_t duplex : 1;
+ uint64_t slottime : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx;
+ struct cvmx_gmxx_prtx_cfg_s cn52xx;
+ struct cvmx_gmxx_prtx_cfg_s cn52xxp1;
+ struct cvmx_gmxx_prtx_cfg_s cn56xx;
+ struct cvmx_gmxx_prtx_cfg_s cn56xxp1;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1;
+} cvmx_gmxx_prtx_cfg_t;
+
+
+/**
+ * cvmx_gmx#_rx#_adr_cam0
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to GMX_RX_ADR_CAM will not
+ change the CSR when GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1;
+} cvmx_gmxx_rxx_adr_cam0_t;
+
+
+/**
+ * cvmx_gmx#_rx#_adr_cam1
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to GMX_RX_ADR_CAM will not
+ change the CSR when GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1;
+} cvmx_gmxx_rxx_adr_cam1_t;
+
+
+/**
+ * cvmx_gmx#_rx#_adr_cam2
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to GMX_RX_ADR_CAM will not
+ change the CSR when GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1;
+} cvmx_gmxx_rxx_adr_cam2_t;
+
+
+/**
+ * cvmx_gmx#_rx#_adr_cam3
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to GMX_RX_ADR_CAM will not
+ change the CSR when GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1;
+} cvmx_gmxx_rxx_adr_cam3_t;
+
+
+/**
+ * cvmx_gmx#_rx#_adr_cam4
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to GMX_RX_ADR_CAM will not
+ change the CSR when GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1;
+} cvmx_gmxx_rxx_adr_cam4_t;
+
+
+/**
+ * cvmx_gmx#_rx#_adr_cam5
+ *
+ * GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to GMX_RX_ADR_CAM will not
+ change the CSR when GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1;
+} cvmx_gmxx_rxx_adr_cam5_t;
+
+
+/**
+ * cvmx_gmx#_rx#_adr_cam_en
+ *
+ * GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t en : 8; /**< CAM Entry Enables */
+#else
+ uint64_t en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1;
+} cvmx_gmxx_rxx_adr_cam_en_t;
+
+
+/**
+ * cvmx_gmx#_rx#_adr_ctl
+ *
+ * GMX_RX_ADR_CTL = Address Filtering Control
+ *
+ *
+ * Notes:
+ * * ALGORITHM
+ * Here is some pseudo code that represents the address filter behavior.
+ *
+ * @verbatim
+ * bool dmac_addr_filter(uint8 prt, uint48 dmac) [
+ * ASSERT(prt >= 0 && prt <= 3);
+ * if (is_bcst(dmac)) // broadcast accept
+ * return (GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
+ * if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject
+ * return REJECT;
+ * if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept
+ * return ACCEPT;
+ *
+ * cam_hit = 0;
+ *
+ * for (i=0; i<8; i++) [
+ * if (GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0)
+ * continue;
+ * uint48 unswizzled_mac_adr = 0x0;
+ * for (j=5; j>=0; j--) [
+ * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>];
+ * ]
+ * if (unswizzled_mac_adr == dmac) [
+ * cam_hit = 1;
+ * break;
+ * ]
+ * ]
+ *
+ * if (cam_hit)
+ * return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
+ * else
+ * return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
+ * ]
+ * @endverbatim
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter
+ 0 = reject the packet on DMAC address match
+ 1 = accept the packet on DMAC address match */
+ uint64_t mcst : 2; /**< Multicast Mode
+ 0 = Use the Address Filter CAM
+ 1 = Force reject all multicast packets
+ 2 = Force accept all multicast packets
+ 3 = Reserved */
+ uint64_t bcst : 1; /**< Accept All Broadcast Packets */
+#else
+ uint64_t bcst : 1;
+ uint64_t mcst : 2;
+ uint64_t cam_mode : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1;
+} cvmx_gmxx_rxx_adr_ctl_t;
+
+
+/**
+ * cvmx_gmx#_rx#_decision
+ *
+ * GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
+ *
+ *
+ * Notes:
+ * As each byte in a packet is received by GMX, the L2 byte count is compared
+ * against the GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes
+ * from the beginning of the L2 header (DMAC). In normal operation, the L2
+ * header begins after the PREAMBLE+SFD (GMX_RX_FRM_CTL[PRE_CHK]=1) and any
+ * optional UDD skip data (GMX_RX_UDD_SKP[LEN]).
+ *
+ * When GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
+ * packet and would require UDD skip length to account for them.
+ *
+ * L2 Size
+ * Port Mode <GMX_RX_DECISION bytes (default=24) >=GMX_RX_DECISION bytes (default=24)
+ *
+ * Full Duplex accept packet apply filters
+ * no filtering is applied accept packet based on DMAC and PAUSE packet filters
+ *
+ * Half Duplex drop packet apply filters
+ * packet is unconditionally dropped accept packet based on DMAC
+ *
+ * where l2_size = MAX(0, total_packet_size - GMX_RX_UDD_SKP[LEN] - ((GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_decision_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t cnt : 5; /**< The byte count to decide when to accept or filter
+ a packet. */
+#else
+ uint64_t cnt : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_decision_s cn30xx;
+ struct cvmx_gmxx_rxx_decision_s cn31xx;
+ struct cvmx_gmxx_rxx_decision_s cn38xx;
+ struct cvmx_gmxx_rxx_decision_s cn38xxp2;
+ struct cvmx_gmxx_rxx_decision_s cn50xx;
+ struct cvmx_gmxx_rxx_decision_s cn52xx;
+ struct cvmx_gmxx_rxx_decision_s cn52xxp1;
+ struct cvmx_gmxx_rxx_decision_s cn56xx;
+ struct cvmx_gmxx_rxx_decision_s cn56xxp1;
+ struct cvmx_gmxx_rxx_decision_s cn58xx;
+ struct cvmx_gmxx_rxx_decision_s cn58xxp1;
+} cvmx_gmxx_rxx_decision_t;
+
+
+/**
+ * cvmx_gmx#_rx#_frm_chk
+ *
+ * GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
+ *
+ *
+ * Notes:
+ * If GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
+ *
+ * In XAUI mode prt0 is used for checking.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_chk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_frm_chk_s cn30xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn31xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn38xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_chk_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t reserved_6_6 : 1;
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1;
+ struct cvmx_gmxx_rxx_frm_chk_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1;
+} cvmx_gmxx_rxx_frm_chk_t;
+
+
+/**
+ * cvmx_gmx#_rx#_frm_ctl
+ *
+ * GMX_RX_FRM_CTL = Frame Control
+ *
+ *
+ * Notes:
+ * * PRE_CHK
+ * When set, the RX state expects a typical frame consisting of
+ * INTER_FRAME=>PREAMBLE(x7)=>SFD(x1)=>DAT. The state machine watches for
+ * this exact sequence in order to recognize a valid frame and push frame
+ * data into the Octane. There must be exactly 7 PREAMBLE cycles followed by
+ * the single SFD cycle for the frame to be accepted.
+ *
+ * When a problem does occur within the PREAMBLE seqeunce, the frame is
+ * marked as bad and not sent into the core. The GMX_RX_INT_REG[PCTERR]
+ * interrupt is also raised.
+ *
+ * * PRE_STRP
+ * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
+ * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
+ * core as part of the packet.
+ *
+ * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
+ * size when checking against the MIN and MAX bounds. Furthermore, the bytes
+ * are skipped when locating the start of the L2 header for DMAC and Control
+ * frame recognition.
+ *
+ * * CTL_BCK/CTL_DRP
+ * These bits control how the HW handles incoming PAUSE packets. Here are
+ * the most common modes of operation:
+ * CTL_BCK=1,CTL_DRP=1 - HW does it all
+ * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames
+ * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored
+ *
+ * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
+ * Since PAUSE packets only apply to fulldup operation, any PAUSE packet
+ * would constitute an exception which should be handled by the processing
+ * cores. PAUSE packets should not be forwarded.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_11_63 : 53;
+ uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
+ due to PARITAL packets
+ In spi4 mode, all ports use prt0 for checking. */
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PREAMBLE can be consumed
+ by the HW so when PRE_ALIGN is set, PRE_FREE,
+ PRE_STRP must be set for correct operation.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pad_len : 1; /**< When set, disables the length check for non-min
+ sized pkts with padding in the client data
+ (PASS3 Only) */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts
+ (PASS2 only) */
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ 0 - 254 cycles of PREAMBLE followed by SFD */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t pre_align : 1;
+ uint64_t null_dis : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t pad_len : 1; /**< When set, disables the length check for non-min
+ sized pkts with padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking.
+ 0-7 cycles of PREAMBLE followed by SFD (pass 1.0)
+ 0-254 cycles of PREAMBLE followed by SFD (else) */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking.
+ 0 - 7 cycles of PREAMBLE followed by SFD (pass1.0)
+ 0 - 254 cycles of PREAMBLE followed by SFD (else) */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn31xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_11_63 : 53;
+ uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
+ due to PARITAL packets */
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PREAMBLE can be consumed
+ by the HW so when PRE_ALIGN is set, PRE_FREE,
+ PRE_STRP must be set for correct operation.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking.
+ 0-254 cycles of PREAMBLE followed by SFD */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_align : 1;
+ uint64_t null_dis : 1;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PRE_STRP should be set to
+ account for the variable nature of the PREAMBLE.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features.
+ (SGMII at 10/100Mbs only) */
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ 0 - 254 cycles of PREAMBLE followed by SFD
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features.
+ (SGMII/1000Base-X only) */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly.
+ When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
+ must be zero. */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_align : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1;
+} cvmx_gmxx_rxx_frm_ctl_t;
+
+
+/**
+ * cvmx_gmx#_rx#_frm_max
+ *
+ * GMX_RX_FRM_MAX = Frame Max length
+ *
+ *
+ * Notes:
+ * In spi4 mode, all spi4 ports use prt0 for checking.
+ *
+ * When changing the LEN field, be sure that LEN does not exceed
+ * GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
+ * are within the maximum length parameter to be rejected because they exceed
+ * the GMX_RX_JABBER[CNT] limit.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_max_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t len : 16; /**< Byte count for Max-sized frame check
+ Failing packets set the MAXERR interrupt and are
+ optionally sent with opcode==MAXERR
+ LEN =< GMX_RX_JABBER[CNT] */
+#else
+ uint64_t len : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_frm_max_s cn30xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn31xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn38xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_max_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn58xxp1;
+} cvmx_gmxx_rxx_frm_max_t;
+
+
+/**
+ * cvmx_gmx#_rx#_frm_min
+ *
+ * GMX_RX_FRM_MIN = Frame Min length
+ *
+ *
+ * Notes:
+ * In spi4 mode, all spi4 ports use prt0 for checking.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_min_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t len : 16; /**< Byte count for Min-sized frame check
+ Failing packets set the MINERR interrupt and are
+ optionally sent with opcode==MINERR */
+#else
+ uint64_t len : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_frm_min_s cn30xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn31xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn38xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_min_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn58xxp1;
+} cvmx_gmxx_rxx_frm_min_t;
+
+
+/**
+ * cvmx_gmx#_rx#_ifg
+ *
+ * GMX_RX_IFG = RX Min IFG
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_ifg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t ifg : 4; /**< Min IFG between packets used to determine IFGERR
+ 1000Mbs, IFG==0.096us or 12 clks
+ 100Mbs, IFG==0.96us or 24 clks
+ 10Mbs, IFG==9.6us or 24 clks
+ In order to simplify the programming model,
+ IFG is doubled internally when
+ GMX_PRT_CFG[SPEED]==0. */
+#else
+ uint64_t ifg : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_ifg_s cn30xx;
+ struct cvmx_gmxx_rxx_ifg_s cn31xx;
+ struct cvmx_gmxx_rxx_ifg_s cn38xx;
+ struct cvmx_gmxx_rxx_ifg_s cn38xxp2;
+ struct cvmx_gmxx_rxx_ifg_s cn50xx;
+ struct cvmx_gmxx_rxx_ifg_s cn52xx;
+ struct cvmx_gmxx_rxx_ifg_s cn52xxp1;
+ struct cvmx_gmxx_rxx_ifg_s cn56xx;
+ struct cvmx_gmxx_rxx_ifg_s cn56xxp1;
+ struct cvmx_gmxx_rxx_ifg_s cn58xx;
+ struct cvmx_gmxx_rxx_ifg_s cn58xxp1;
+} cvmx_gmxx_rxx_ifg_t;
+
+
+/**
+ * cvmx_gmx#_rx#_int_en
+ *
+ * GMX_RX_INT_EN = Interrupt Enable
+ *
+ *
+ * Notes:
+ * In XAUI mode prt0 is used for checking.
+ *
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_29_63 : 35;
+ uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */
+ uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t hg2fld : 1;
+ uint64_t hg2cc : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_int_en_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_19_63 : 45;
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx;
+ struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx;
+ struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2;
+ struct cvmx_gmxx_rxx_int_en_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t reserved_6_6 : 1;
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_en_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_29_63 : 35;
+ uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */
+ uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t hg2fld : 1;
+ uint64_t hg2cc : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx;
+ struct cvmx_gmxx_rxx_int_en_cn56xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_27_63 : 37;
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t reserved_27_63 : 37;
+#endif
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_int_en_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn58xx;
+ struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1;
+} cvmx_gmxx_rxx_int_en_t;
+
+
+/**
+ * cvmx_gmx#_rx#_int_reg
+ *
+ * GMX_RX_INT_REG = Interrupt Register
+ *
+ *
+ * Notes:
+ * (1) exceptions will only be raised to the control processor if the
+ * corresponding bit in the GMX_RX_INT_EN register is set.
+ *
+ * (2) exception conditions 10:0 can also set the rcv/opcode in the received
+ * packet's workQ entry. The GMX_RX_FRM_CHK register provides a bit mask
+ * for configuring which conditions set the error.
+ *
+ * (3) in half duplex operation, the expectation is that collisions will appear
+ * as either MINERR o r CAREXT errors.
+ *
+ * (4) JABBER - An RX Jabber error indicates that a packet was received which
+ * is longer than the maximum allowed packet as defined by the
+ * system. GMX will truncate the packet at the JABBER count.
+ * Failure to do so could lead to system instabilty.
+ *
+ * (5) NIBERR - This error is illegal at 1000Mbs speeds
+ * (GMX_RX_PRT_CFG[SPEED]==0) and will never assert.
+ *
+ * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
+ * GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
+ * > GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
+ *
+ * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < GMX_RX_FRM_MIN.
+ *
+ * (8) ALNERR - Indicates that the packet received was not an integer number of
+ * bytes. If FCS checking is enabled, ALNERR will only assert if
+ * the FCS is bad. If FCS checking is disabled, ALNERR will
+ * assert in all non-integer frame cases.
+ *
+ * (9) Collisions - Collisions can only occur in half-duplex mode. A collision
+ * is assumed by the receiver when the slottime
+ * (GMX_PRT_CFG[SLOTTIME]) is not satisfied. In 10/100 mode,
+ * this will result in a frame < SLOTTIME. In 1000 mode, it
+ * could result either in frame < SLOTTIME or a carrier extend
+ * error with the SLOTTIME. These conditions are visible by...
+ *
+ * . transfer ended before slottime - COLDET
+ * . carrier extend error - CAREXT
+ *
+ * (A) LENERR - Length errors occur when the received packet does not match the
+ * length field. LENERR is only checked for packets between 64
+ * and 1500 bytes. For untagged frames, the length must exact
+ * match. For tagged frames the length or length+4 must match.
+ *
+ * (B) PCTERR - checks that the frame transtions from PREAMBLE=>SFD=>DATA.
+ * Does not check the number of PREAMBLE cycles.
+ *
+ * (C) OVRERR - Not to be included in the HRM
+ *
+ * OVRERR is an architectural assertion check internal to GMX to
+ * make sure no assumption was violated. In a correctly operating
+ * system, this interrupt can never fire.
+ *
+ * GMX has an internal arbiter which selects which of 4 ports to
+ * buffer in the main RX FIFO. If we normally buffer 8 bytes,
+ * then each port will typically push a tick every 8 cycles - if
+ * the packet interface is going as fast as possible. If there
+ * are four ports, they push every two cycles. So that's the
+ * assumption. That the inbound module will always be able to
+ * consume the tick before another is produced. If that doesn't
+ * happen - that's when OVRERR will assert.
+ *
+ * (D) In XAUI mode prt0 is used for interrupt logging.
+ */
+typedef union
+{
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_29_63 : 35;
+ uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error
+ Set when either CRC8 error detected or when
+ a Control Character is found in the message
+ bytes after the K.SOM
+ NOTE: HG2CC has higher priority than HG2FLD
+ i.e. a HiGig2 message that results in HG2CC
+ getting set, will never set HG2FLD. */
+ uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below
+ 1) MSG_TYPE field not 6'b00_0000
+ i.e. it is not a FLOW CONTROL message, which
+ is the only defined type for HiGig2
+ 2) FWD_TYPE field not 2'b00 i.e. Link Level msg
+ which is the only defined type for HiGig2
+ 3) FC_OBJECT field is neither 4'b0000 for
+ Physical Link nor 4'b0010 for Logical Link.
+ Those are the only two defined types in HiGig2 */
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t hg2fld : 1;
+ uint64_t hg2cc : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_19_63 : 45;
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2;
+ struct cvmx_gmxx_rxx_int_reg_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< RGMII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t reserved_6_6 : 1;
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< RGMII carrier extend error */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t reserved_6_6 : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_29_63 : 35;
+ uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error
+ Set when either CRC8 error detected or when
+ a Control Character is found in the message
+ bytes after the K.SOM
+ NOTE: HG2CC has higher priority than HG2FLD
+ i.e. a HiGig2 message that results in HG2CC
+ getting set, will never set HG2FLD. */
+ uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below
+ 1) MSG_TYPE field not 6'b00_0000
+ i.e. it is not a FLOW CONTROL message, which
+ is the only defined type for HiGig2
+ 2) FWD_TYPE field not 2'b00 i.e. Link Level msg
+ which is the only defined type for HiGig2
+ 3) FC_OBJECT field is neither 4'b0000 for
+ Physical Link nor 4'b0010 for Logical Link.
+ Those are the only two defined types in HiGig2 */
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol
+ In XAUI mode, the column of data that was bad
+ will be logged in GMX_RX_XAUI_BAD_COL */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t loc_fault : 1;
+ uint64_t rem_fault : 1;
+ uint64_t bad_seq : 1;
+ uint64_t bad_term : 1;
+ uint64_t unsop : 1;
+ uint64_t uneop : 1;
+ uint64_t undat : 1;
+ uint64_t hg2fld : 1;
+ uint64_t hg2cc : 1;
+ uint64_t reserved_29_63 : 35;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx;
+ struct cvmx_gmxx_rxx_int_reg_cn56xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_27_63 : 37;
+ uint64_t undat : 1; /**< Unexpected Data
+ (XAUI Mode only) */
+ uint64_t uneop : 1; /**< Unexpected EOP
+ (XAUI Mode only) */
+ uint64_t unsop : 1; /**< Unexpected SOP
+ (XAUI Mode only) */
+ uint64_t bad_term : 1; /**< Frame is terminated by control character other
+ than /T/. The error propagation control
+ character /E/ will be included as part of the
+ frame and does not cause a frame termination.
+ (XAUI Mode only) */
+ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
+ (XAUI Mode only) */
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure
+ (SGMII/1000Base-X only) */
+ uint64_t coldet : 1; /**< Collision Detection
+ (SGMII/1000Base-X half-duplex only) */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime
+ (SGMII/1000Base-X only) */
+ uint64_t rsverr : 1; /**< Reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol
+ In XAUI mode, the column of data that was bad
+ will be logged in GMX_RX_XAUI_BAD_COL */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Data reception error */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t reserved_2_2 : 1;
+ uint64_t carext : 1; /**< Carrier extend error
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t carext : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr