aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJuli Mallett <jmallett@FreeBSD.org>2010-11-28 06:20:41 +0000
committerJuli Mallett <jmallett@FreeBSD.org>2010-11-28 06:20:41 +0000
commit7a1e830cbcd5d33aa7fcdd8732e6ea26510508fd (patch)
treeb8a47c127e9100c1078d4e43f050dccc3b15f210
parent1c305b501145f696d3597fb9b5b2091caaa6f67c (diff)
downloadsrc-7a1e830cbcd5d33aa7fcdd8732e6ea26510508fd.tar.gz
src-7a1e830cbcd5d33aa7fcdd8732e6ea26510508fd.zip
Import Cavium Octeon SDK 2.0 Simple Executive from cnusers.org.vendor/octeon-sdk/2.0.0
Notes
Notes: svn path=/vendor-sys/octeon-sdk/dist/; revision=215976 svn path=/vendor-sys/octeon-sdk/2.0.0/; revision=215977; tag=vendor/octeon-sdk/2.0.0
-rw-r--r--README.txt43
-rw-r--r--cvmip.h66
-rw-r--r--cvmx-abi.h66
-rw-r--r--cvmx-access-native.h190
-rw-r--r--cvmx-access.h98
-rw-r--r--cvmx-address.h150
-rw-r--r--cvmx-agl-defs.h4615
-rw-r--r--cvmx-app-hotplug.c402
-rw-r--r--cvmx-app-hotplug.h103
-rw-r--r--cvmx-app-init-linux.c169
-rw-r--r--cvmx-app-init.c344
-rw-r--r--cvmx-app-init.h133
-rw-r--r--cvmx-asm.h281
-rw-r--r--cvmx-asx.h66
-rw-r--r--cvmx-asx0-defs.h147
-rw-r--r--cvmx-asxx-defs.h1382
-rw-r--r--cvmx-atomic.h280
-rw-r--r--cvmx-bootloader.h100
-rw-r--r--cvmx-bootmem.c801
-rw-r--r--cvmx-bootmem.h108
-rw-r--r--cvmx-ciu-defs.h5527
-rw-r--r--cvmx-ciu.h65
-rw-r--r--cvmx-clock.c137
-rw-r--r--cvmx-clock.h139
-rw-r--r--cvmx-cmd-queue.c109
-rw-r--r--cvmx-cmd-queue.h109
-rw-r--r--cvmx-cn3010-evb-hs5.c66
-rw-r--r--cvmx-cn3010-evb-hs5.h66
-rw-r--r--cvmx-compactflash.c116
-rw-r--r--cvmx-compactflash.h64
-rw-r--r--cvmx-core.c105
-rw-r--r--cvmx-core.h90
-rw-r--r--cvmx-coremask.c66
-rw-r--r--cvmx-coremask.h74
-rw-r--r--cvmx-crypto.c78
-rw-r--r--cvmx-crypto.h70
-rw-r--r--cvmx-csr-addresses.h15490
-rw-r--r--cvmx-csr-db-support.c152
-rw-r--r--cvmx-csr-db.c66093
-rw-r--r--cvmx-csr-db.h89
-rw-r--r--cvmx-csr-enums.h76
-rw-r--r--cvmx-csr-typedefs.h74070
-rw-r--r--cvmx-csr.h73
-rw-r--r--cvmx-cvmmem.h73
-rw-r--r--cvmx-dbg-defs.h156
-rw-r--r--cvmx-debug-handler.S271
-rw-r--r--cvmx-debug-remote.c95
-rw-r--r--cvmx-debug-uart.c239
-rw-r--r--cvmx-debug.c1436
-rw-r--r--cvmx-debug.h457
-rw-r--r--cvmx-dfa-defs.h4982
-rw-r--r--cvmx-dfa.c68
-rw-r--r--cvmx-dfa.h70
-rw-r--r--cvmx-dfm-defs.h3224
-rw-r--r--cvmx-dma-engine.c149
-rw-r--r--cvmx-dma-engine.h72
-rw-r--r--cvmx-dpi-defs.h1305
-rw-r--r--cvmx-ebt3000.c66
-rw-r--r--cvmx-ebt3000.h66
-rw-r--r--cvmx-error-custom.c624
-rw-r--r--cvmx-error-custom.h91
-rw-r--r--cvmx-error-init-cn30xx.c3502
-rw-r--r--cvmx-error-init-cn31xx.c3833
-rw-r--r--cvmx-error-init-cn38xx.c4864
-rw-r--r--cvmx-error-init-cn38xxp2.c4421
-rw-r--r--cvmx-error-init-cn50xx.c3604
-rw-r--r--cvmx-error-init-cn52xx.c6679
-rw-r--r--cvmx-error-init-cn52xxp1.c6578
-rw-r--r--cvmx-error-init-cn56xx.c7625
-rw-r--r--cvmx-error-init-cn56xxp1.c7176
-rw-r--r--cvmx-error-init-cn58xx.c4937
-rw-r--r--cvmx-error-init-cn58xxp1.c4920
-rw-r--r--cvmx-error-init-cn63xx.c7183
-rw-r--r--cvmx-error-init-cn63xxp1.c6743
-rw-r--r--cvmx-error.c642
-rw-r--r--cvmx-error.h318
-rw-r--r--cvmx-fau.h108
-rw-r--r--cvmx-flash.c70
-rw-r--r--cvmx-flash.h74
-rw-r--r--cvmx-fpa-defs.h1423
-rw-r--r--cvmx-fpa.c177
-rw-r--r--cvmx-fpa.h155
-rw-r--r--cvmx-gmx.h68
-rw-r--r--cvmx-gmxx-defs.h8057
-rw-r--r--cvmx-gpio-defs.h542
-rw-r--r--cvmx-gpio.h68
-rw-r--r--cvmx-helper-board.c254
-rw-r--r--cvmx-helper-board.h96
-rw-r--r--cvmx-helper-check-defines.h73
-rw-r--r--cvmx-helper-errata.c111
-rw-r--r--cvmx-helper-errata.h80
-rw-r--r--cvmx-helper-fpa.c70
-rw-r--r--cvmx-helper-fpa.h68
-rw-r--r--cvmx-helper-jtag.c220
-rw-r--r--cvmx-helper-jtag.h106
-rw-r--r--cvmx-helper-loop.c80
-rw-r--r--cvmx-helper-loop.h66
-rw-r--r--cvmx-helper-npi.c81
-rw-r--r--cvmx-helper-npi.h66
-rw-r--r--cvmx-helper-rgmii.c108
-rw-r--r--cvmx-helper-rgmii.h66
-rw-r--r--cvmx-helper-sgmii.c117
-rw-r--r--cvmx-helper-sgmii.h66
-rw-r--r--cvmx-helper-spi.c90
-rw-r--r--cvmx-helper-spi.h66
-rw-r--r--cvmx-helper-srio.c317
-rw-r--r--cvmx-helper-srio.h107
-rw-r--r--cvmx-helper-util.c227
-rw-r--r--cvmx-helper-util.h120
-rw-r--r--cvmx-helper-xaui.c116
-rw-r--r--cvmx-helper-xaui.h66
-rw-r--r--cvmx-helper.c719
-rw-r--r--cvmx-helper.h94
-rw-r--r--cvmx-higig.h189
-rw-r--r--cvmx-interrupt-decodes.c3584
-rw-r--r--cvmx-interrupt-handler.S63
-rw-r--r--cvmx-interrupt-rsl.c762
-rw-r--r--cvmx-interrupt.c373
-rw-r--r--cvmx-interrupt.h130
-rw-r--r--cvmx-iob-defs.h1307
-rw-r--r--cvmx-iob.h66
-rw-r--r--cvmx-ipd-defs.h2458
-rw-r--r--cvmx-ipd.h139
-rw-r--r--cvmx-ixf18201.c362
-rw-r--r--cvmx-ixf18201.h112
-rw-r--r--cvmx-key-defs.h254
-rw-r--r--cvmx-key.h68
-rw-r--r--cvmx-l2c-defs.h5889
-rw-r--r--cvmx-l2c.c1179
-rw-r--r--cvmx-l2c.h304
-rw-r--r--cvmx-l2d-defs.h1172
-rw-r--r--cvmx-l2t-defs.h656
-rw-r--r--cvmx-led-defs.h656
-rw-r--r--cvmx-llm.c74
-rw-r--r--cvmx-llm.h90
-rw-r--r--cvmx-lmc.h66
-rw-r--r--cvmx-lmcx-defs.h7061
-rw-r--r--cvmx-log-arc.S56
-rw-r--r--cvmx-log.c79
-rw-r--r--cvmx-log.h157
-rw-r--r--cvmx-malloc.h78
-rw-r--r--cvmx-malloc/README-malloc12
-rw-r--r--cvmx-malloc/arena.c293
-rw-r--r--cvmx-malloc/malloc.c4106
-rw-r--r--cvmx-malloc/malloc.h213
-rw-r--r--cvmx-malloc/thread-m.h73
-rw-r--r--cvmx-mdio.h190
-rw-r--r--cvmx-mgmt-port.c470
-rw-r--r--cvmx-mgmt-port.h128
-rw-r--r--cvmx-mio-defs.h6586
-rw-r--r--cvmx-mio.h66
-rw-r--r--cvmx-mixx-defs.h1447
-rw-r--r--cvmx-mpi-defs.h299
-rw-r--r--cvmx-nand.c520
-rw-r--r--cvmx-nand.h112
-rw-r--r--cvmx-ndf-defs.h534
-rw-r--r--cvmx-npei-defs.h7676
-rw-r--r--cvmx-npi-defs.h4746
-rw-r--r--cvmx-npi.h66
-rw-r--r--cvmx-packet.h73
-rw-r--r--cvmx-pci-defs.h4714
-rw-r--r--cvmx-pci.h66
-rw-r--r--cvmx-pcie.c774
-rw-r--r--cvmx-pcie.h83
-rw-r--r--cvmx-pcieepx-defs.h4421
-rw-r--r--cvmx-pciercx-defs.h4432
-rw-r--r--cvmx-pcm-defs.h230
-rw-r--r--cvmx-pcmx-defs.h1082
-rw-r--r--cvmx-pcsx-defs.h1180
-rw-r--r--cvmx-pcsxx-defs.h926
-rw-r--r--cvmx-pemx-defs.h1192
-rw-r--r--cvmx-pescx-defs.h1092
-rw-r--r--cvmx-pexp-defs.h2065
-rw-r--r--cvmx-pip-defs.h3926
-rw-r--r--cvmx-pip.h245
-rw-r--r--cvmx-pko-defs.h2652
-rw-r--r--cvmx-pko.c110
-rw-r--r--cvmx-pko.h127
-rw-r--r--cvmx-platform.h100
-rw-r--r--cvmx-pow-defs.h1827
-rw-r--r--cvmx-pow.c80
-rw-r--r--cvmx-pow.h80
-rw-r--r--cvmx-power-throttle.c152
-rw-r--r--cvmx-power-throttle.h137
-rw-r--r--cvmx-rad-defs.h1006
-rw-r--r--cvmx-raid.c74
-rw-r--r--cvmx-raid.h66
-rw-r--r--cvmx-resources.config172
-rw-r--r--cvmx-rng.h76
-rw-r--r--cvmx-rnm-defs.h290
-rw-r--r--cvmx-rtc.h74
-rw-r--r--cvmx-rwlock.h74
-rw-r--r--cvmx-scratch.h74
-rw-r--r--cvmx-shared-linux-n32.ld279
-rw-r--r--cvmx-shared-linux-o32.ld277
-rw-r--r--cvmx-shared-linux.ld278
-rw-r--r--cvmx-shmem.c748
-rw-r--r--cvmx-shmem.h139
-rw-r--r--cvmx-sim-magic.h198
-rw-r--r--cvmx-sli-defs.h4229
-rw-r--r--cvmx-smi-defs.h101
-rw-r--r--cvmx-smix-defs.h450
-rw-r--r--cvmx-spi.c102
-rw-r--r--cvmx-spi.h72
-rw-r--r--cvmx-spi4000.c79
-rw-r--r--cvmx-spinlock.h66
-rw-r--r--cvmx-spx0-defs.h120
-rw-r--r--cvmx-spxx-defs.h1434
-rw-r--r--cvmx-srio.c1239
-rw-r--r--cvmx-srio.h525
-rw-r--r--cvmx-sriomaintx-defs.h4392
-rw-r--r--cvmx-sriox-defs.h3703
-rw-r--r--cvmx-srxx-defs.h375
-rw-r--r--cvmx-stxx-defs.h896
-rw-r--r--cvmx-swap.h64
-rw-r--r--cvmx-sysinfo.c102
-rw-r--r--cvmx-sysinfo.h191
-rw-r--r--cvmx-thunder.c66
-rw-r--r--cvmx-thunder.h74
-rw-r--r--cvmx-tim-defs.h510
-rw-r--r--cvmx-tim.c107
-rw-r--r--cvmx-tim.h71
-rw-r--r--cvmx-tlb.c470
-rw-r--r--cvmx-tlb.h270
-rw-r--r--cvmx-tra-defs.h3176
-rw-r--r--cvmx-tra.c497
-rw-r--r--cvmx-tra.h435
-rw-r--r--cvmx-twsi-raw.c464
-rw-r--r--cvmx-twsi-raw.h331
-rw-r--r--cvmx-twsi.c605
-rw-r--r--cvmx-twsi.h103
-rw-r--r--cvmx-uahcx-defs.h2536
-rw-r--r--cvmx-uart.c171
-rw-r--r--cvmx-uart.h161
-rw-r--r--cvmx-uctlx-defs.h850
-rw-r--r--cvmx-usb.c1605
-rw-r--r--cvmx-usb.h116
-rw-r--r--cvmx-usbcx-defs.h4359
-rw-r--r--cvmx-usbd.c1041
-rw-r--r--cvmx-usbd.h300
-rw-r--r--cvmx-usbnx-defs.h2386
-rw-r--r--cvmx-utils.h93
-rw-r--r--cvmx-version.h40
-rw-r--r--cvmx-warn.c66
-rw-r--r--cvmx-warn.h66
-rw-r--r--cvmx-wqe.h83
-rw-r--r--cvmx-zip-defs.h434
-rw-r--r--cvmx-zip.c66
-rw-r--r--cvmx-zip.h71
-rw-r--r--cvmx-zone.c68
-rw-r--r--cvmx.h68
-rw-r--r--cvmx.mk144
-rw-r--r--executive-config.h.template180
-rw-r--r--octeon-boot-info.h152
-rw-r--r--octeon-feature.h154
-rw-r--r--octeon-model.c150
-rw-r--r--octeon-model.h106
-rw-r--r--octeon-pci-console.c77
-rw-r--r--octeon-pci-console.h66
259 files changed, 278169 insertions, 126600 deletions
diff --git a/README.txt b/README.txt
deleted file mode 100644
index 553c46df946a..000000000000
--- a/README.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Readme for the Octeon Executive Library
-
-
-The Octeon Executive Library provides runtime support and hardware
-abstraction for the Octeon processor. The executive is composed of the
-libcvmx.a library as well as header files that provide
-functionality with inline functions.
-
-
-Usage:
-
-The libcvmx.a library is built for every application as part of the
-application build. (Please refer to the 'related pages' section of the
-HTML documentation for more information on the build system.)
-Applications using the executive should include the header files from
-$OCTEON_ROOT/target/include and link against the library that is built in
-the local obj directory. Each file using the executive
-should include the following two header files in order:
-
-#include "cvmx-config.h"
-#include "cvmx.h"
-
-The cvmx-config.h file contains configuration information for the
-executive and is generated by the cvmx-config script from an
-'executive-config.h' file. A sample version of this file is provided
-in the executive directory as 'executive-config.h.template'.
-
-Copy this file to 'executive-config.h' into the 'config' subdirectory
-of the application directory and customize as required by the application.
-Applications that don't use any simple executive functionality can omit
-the cvmx-config.h header file. Please refer to the examples for a
-demonstration of where to put the executive-config.h file and for an
-example of generated cvmx-config.h.
-
-For file specific information please see the documentation within the
-source files or the HTML documentation provided in docs/html/index.html.
-The HTML documentation is automatically generated by Doxygen from the
-source files.
-
-
-
-==========================================================================
-Please see the release notes for version specific information.
diff --git a/cvmip.h b/cvmip.h
index a8ac16d5bdaa..b3aa6b0ef97a 100644
--- a/cvmip.h
+++ b/cvmip.h
@@ -1,39 +1,40 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
@@ -41,6 +42,7 @@
+
/**
* @file
*
@@ -48,7 +50,7 @@
*
* Definitions for the Internet Protocol (IP) support.
*
- * <hr>$Revision: 41586 $<hr>
+ * <hr>$Revision: 49448 $<hr>
*
*/
diff --git a/cvmx-abi.h b/cvmx-abi.h
index 0711558d768f..93d71b3b6303 100644
--- a/cvmx-abi.h
+++ b/cvmx-abi.h
@@ -1,39 +1,40 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
@@ -41,12 +42,13 @@
+
/**
* @file
*
* This file defines macros for use in determining the current calling ABI.
*
- * <hr>$Revision: 41586 $<hr>
+ * <hr>$Revision: 49448 $<hr>
*/
#ifndef __CVMX_ABI_H__
diff --git a/cvmx-access-native.h b/cvmx-access-native.h
index c16ca9e96a2e..212b7c7af441 100644
--- a/cvmx-access-native.h
+++ b/cvmx-access-native.h
@@ -1,41 +1,43 @@
/***********************license start***************
- * Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+
/**
* @file
* Functions for accessing memory and CSRs on Octeon when we are compiling
@@ -81,9 +83,24 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
cvmx_warn_if(ptr==NULL, "cvmx_ptr_to_phys() passed a NULL pointer\n");
#ifdef CVMX_BUILD_FOR_UBOOT
- /* U-boot is a special case, as it is running in error level, which disables the TLB completely.
- ** U-boot may use kseg0 addresses, or may directly use physical addresses already */
- return(CAST64(ptr) & 0x7FFFFFFF);
+ uint64_t uboot_tlb_ptr_to_phys(void *ptr);
+
+ if (((uint32_t)ptr) < 0x80000000)
+ {
+ /* Handle useg (unmapped due to ERL) here*/
+ return(CAST64(ptr) & 0x7FFFFFFF);
+ }
+ else if (((uint32_t)ptr) < 0xC0000000)
+ {
+ /* Here we handle KSEG0/KSEG1 _pointers_. We know we are dealing
+ ** with 32 bit only values, so we treat them that way. Note that
+ ** a cvmx_phys_to_ptr(cvmx_ptr_to_phys(X)) will not return X in this case,
+ ** but the physical address of the KSEG0/KSEG1 address. */
+ return(CAST64(ptr) & 0x1FFFFFFF);
+ }
+ else
+ return(uboot_tlb_ptr_to_phys(ptr)); /* Should not get get here in !TLB case */
+
#endif
#ifdef __linux__
@@ -164,14 +181,49 @@ static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
cvmx_warn_if(physical_address==0, "cvmx_phys_to_ptr() passed a zero address\n");
#ifdef CVMX_BUILD_FOR_UBOOT
- /* U-boot is a special case, as it is running in error level, which disables the TLB completely.
- ** U-boot may use kseg0 addresses, or may directly use physical addresses already */
+#if !CONFIG_OCTEON_UBOOT_TLB
if (physical_address >= 0x80000000)
return NULL;
else
return CASTPTR(void, (physical_address & 0x7FFFFFFF));
#endif
+ /* U-boot is a special case, as it is running in 32 bit mode, using the TLB to map code/data
+ ** which can have a physical address above the 32 bit address space. 1-1 mappings are used
+ ** to allow the low 2 GBytes to be accessed as in error level.
+ **
+ ** NOTE: This conversion can cause problems in u-boot, as users may want to enter addresses
+ ** like 0xBFC00000 (kseg1 boot bus address), which is a valid 64 bit physical address,
+ ** but is likely intended to be a boot bus address. */
+
+ if (physical_address < 0x80000000)
+ {
+ /* Handle useg here. ERL is set, so useg is unmapped. This is the only physical
+ ** address range that is directly addressable by u-boot. */
+ return CASTPTR(void, physical_address);
+ }
+ else
+ {
+ DECLARE_GLOBAL_DATA_PTR;
+ extern char uboot_start;
+ /* Above 0x80000000 we can only support one case - a physical address
+ ** that is mapped for u-boot code/data. We check against the u-boot mem range,
+ ** and return NULL if it is out of this range.
+ */
+ if (physical_address >= gd->bd->bi_uboot_ram_addr
+ && physical_address < gd->bd->bi_uboot_ram_addr + gd->bd->bi_uboot_ram_used_size)
+ {
+ return ((char *)&uboot_start + (physical_address - gd->bd->bi_uboot_ram_addr));
+ }
+ else
+ return(NULL);
+ }
+
+ if (physical_address >= 0x80000000)
+ return NULL;
+ else
+#endif
+
#ifdef __linux__
if (sizeof(void*) == 8)
{
@@ -197,7 +249,8 @@ static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
if ((physical_address >= 0x10000000) && (physical_address < 0x20000000))
return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
- else if ((physical_address >= 0x410000000ull) && (physical_address < 0x420000000ull))
+ else if (!OCTEON_IS_MODEL(OCTEON_CN6XXX) && (physical_address >= 0x410000000ull) &&
+ (physical_address < 0x420000000ull))
return CASTPTR(void, physical_address - 0x400000000ull);
else
return CASTPTR(void, physical_address);
@@ -453,7 +506,7 @@ static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
/* Perform an immediate read after every write to an RSL register to force
the write to complete. It doesn't matter what RSL read we do, so we
choose CVMX_MIO_BOOT_BIST_STAT because it is fast and harmless */
- if ((csr_addr >> 40) == (0x800118))
+ if (((csr_addr >> 40) & 0x7ffff) == (0x118))
cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT);
}
@@ -492,7 +545,7 @@ static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
/**
- * Number of the Core on which the program is currently running.
+ * Number of the Core on which the program is currently running.
*
* @return Number of cores
*/
@@ -537,53 +590,36 @@ static inline int cvmx_dpop(uint64_t val)
/**
- * Provide current cycle counter as a return value
+ * @deprecated
+ * Provide current cycle counter as a return value. Deprecated, use
+ * cvmx_clock_get_count(CVMX_CLOCK_CORE) to get cycle counter.
*
* @return current cycle counter
*/
static inline uint64_t cvmx_get_cycle(void)
{
-#if defined(CVMX_ABI_O32)
- uint32_t tmp_low, tmp_hi;
-
- asm volatile (
- " .set push \n"
- " .set mips64r2 \n"
- " .set noreorder \n"
- " rdhwr %[tmpl], $31 \n"
- " dsrl %[tmph], %[tmpl], 32 \n"
- " sll %[tmpl], 0 \n"
- " sll %[tmph], 0 \n"
- " .set pop \n"
- : [tmpl] "=&r" (tmp_low), [tmph] "=&r" (tmp_hi) : );
-
- return(((uint64_t)tmp_hi << 32) + tmp_low);
-#else
- uint64_t cycle;
- CVMX_RDHWR(cycle, 31);
- return(cycle);
-#endif
+ return cvmx_clock_get_count(CVMX_CLOCK_CORE);
}
/**
- * Reads a chip global cycle counter. This counts CPU cycles since
- * chip reset. The counter is 64 bit.
- * This register does not exist on CN38XX pass 1 silicion
+ * @deprecated
+ * Reads a chip global cycle counter. This counts SCLK cycles since
+ * chip reset. The counter is 64 bit. This function is deprecated as the rate
+ * of the global cycle counter is different between Octeon+ and Octeon2, use
+ * cvmx_clock_get_count(CVMX_CLOCK_SCLK) instead. For Octeon2, the clock rate
+ * of SCLK may be differnet than the core clock.
*
* @return Global chip cycle count since chip reset.
*/
static inline uint64_t cvmx_get_cycle_global(void)
{
- if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
- return 0;
- else
- return cvmx_read64_uint64(CVMX_IPD_CLK_COUNT);
+ return cvmx_clock_get_count(CVMX_CLOCK_IPD);
}
/**
- * Wait for the specified number of cycle
+ * Wait for the specified number of core clock cycles
*
* @param cycles
*/
@@ -605,7 +641,7 @@ static inline void cvmx_wait(uint64_t cycles)
*/
static inline void cvmx_wait_usec(uint64_t usec)
{
- uint64_t done = cvmx_get_cycle() + usec * cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
+ uint64_t done = cvmx_get_cycle() + usec * cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000;
while (cvmx_get_cycle() < done)
{
/* Spin */
@@ -614,6 +650,22 @@ static inline void cvmx_wait_usec(uint64_t usec)
/**
+ * Wait for the specified number of io clock cycles
+ *
+ * @param cycles
+ */
+static inline void cvmx_wait_io(uint64_t cycles)
+{
+ uint64_t done = cvmx_clock_get_count(CVMX_CLOCK_SCLK) + cycles;
+
+ while (cvmx_clock_get_count(CVMX_CLOCK_SCLK) < done)
+ {
+ /* Spin */
+ }
+}
+
+
+/**
* Perform a soft reset of Octeon
*
* @return
diff --git a/cvmx-access.h b/cvmx-access.h
index d0da7caea22a..c1206ddd39c1 100644
--- a/cvmx-access.h
+++ b/cvmx-access.h
@@ -1,41 +1,43 @@
/***********************license start***************
- * Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
- *
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+
/**
* @file
* Function prototypes for accessing memory and CSRs on Octeon.
@@ -138,7 +140,7 @@ CVMX_FUNCTION void cvmx_send_single(uint64_t data);
CVMX_FUNCTION void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr);
/**
- * Number of the Core on which the program is currently running.
+ * Number of the Core on which the program is currently running.
*
* @return Number of cores
*/
@@ -165,23 +167,28 @@ CVMX_FUNCTION uint32_t cvmx_pop(uint32_t val);
CVMX_FUNCTION int cvmx_dpop(uint64_t val);
/**
- * Provide current cycle counter as a return value
+ * @deprecated
+ * Provide current cycle counter as a return value. Deprecated, use
+ * cvmx_clock_get_count(CVMX_CLOCK_CORE) to get cycle counter.
*
* @return current cycle counter
*/
CVMX_FUNCTION uint64_t cvmx_get_cycle(void);
/**
- * Reads a chip global cycle counter. This counts CPU cycles since
- * chip reset. The counter is 64 bit.
- * This register does not exist on CN38XX pass 1 silicion
+ * @deprecated
+ * Reads a chip global cycle counter. This counts SCLK cycles since
+ * chip reset. The counter is 64 bit. This function is deprecated as the rate
+ * of the global cycle counter is different between Octeon+ and Octeon2, use
+ * cvmx_clock_get_count(CVMX_CLOCK_SCLK) instead. For Octeon2, the clock rate
+ * of SCLK may be differnet than the core clock.
*
* @return Global chip cycle count since chip reset.
*/
-CVMX_FUNCTION uint64_t cvmx_get_cycle_global(void);
+CVMX_FUNCTION uint64_t cvmx_get_cycle_global(void) __attribute__((deprecated));
/**
- * Wait for the specified number of cycle
+ * Wait for the specified number of core clock cycles
*
* @param cycles
*/
@@ -195,6 +202,13 @@ CVMX_FUNCTION void cvmx_wait(uint64_t cycles);
CVMX_FUNCTION void cvmx_wait_usec(uint64_t usec);
/**
+ * Wait for the specified number of io clock cycles
+ *
+ * @param cycles
+ */
+CVMX_FUNCTION void cvmx_wait_io(uint64_t cycles);
+
+/**
* Perform a soft reset of Octeon
*
* @return
diff --git a/cvmx-address.h b/cvmx-address.h
index 096a68ab958f..daaf6a4cc5c0 100644
--- a/cvmx-address.h
+++ b/cvmx-address.h
@@ -1,41 +1,43 @@
/***********************license start***************
- * Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
- *
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+
/**
* @file
* Typedefs and defines for working with Octeon physical addresses.
@@ -63,29 +65,27 @@ typedef enum {
CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
} cvmx_mips_xkseg_space_t;
-// decodes <14:13> of a kseg3 window address
+ /* decodes <14:13> of a kseg3 window address */
typedef enum {
CVMX_ADD_WIN_SCR = 0L,
- CVMX_ADD_WIN_DMA = 1L, // see cvmx_add_win_dma_dec_t for further decode
+ CVMX_ADD_WIN_DMA = 1L, /* see cvmx_add_win_dma_dec_t for further decode */
CVMX_ADD_WIN_UNUSED = 2L,
CVMX_ADD_WIN_UNUSED2 = 3L
} cvmx_add_win_dec_t;
-// decode within DMA space
+ /* decode within DMA space */
typedef enum {
- CVMX_ADD_WIN_DMA_ADD = 0L, // add store data to the write buffer entry, allocating it if necessary
- CVMX_ADD_WIN_DMA_SENDMEM = 1L, // send out the write buffer entry to DRAM
- // store data must be normal DRAM memory space address in this case
- CVMX_ADD_WIN_DMA_SENDDMA = 2L, // send out the write buffer entry as an IOBDMA command
- // see CVMX_ADD_WIN_DMA_SEND_DEC for data contents
- CVMX_ADD_WIN_DMA_SENDIO = 3L, // send out the write buffer entry as an IO write
- // store data must be normal IO space address in this case
- CVMX_ADD_WIN_DMA_SENDSINGLE = 4L, // send out a single-tick command on the NCB bus
- // no write buffer data needed/used
+ CVMX_ADD_WIN_DMA_ADD = 0L, /* add store data to the write buffer entry, allocating it if necessary */
+ CVMX_ADD_WIN_DMA_SENDMEM = 1L, /* send out the write buffer entry to DRAM */
+ /* store data must be normal DRAM memory space address in this case */
+ CVMX_ADD_WIN_DMA_SENDDMA = 2L, /* send out the write buffer entry as an IOBDMA command */
+ /* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */
+ CVMX_ADD_WIN_DMA_SENDIO = 3L, /* send out the write buffer entry as an IO write */
+ /* store data must be normal IO space address in this case */
+ CVMX_ADD_WIN_DMA_SENDSINGLE = 4L, /* send out a single-tick command on the NCB bus */
+ /* no write buffer data needed/used */
} cvmx_add_win_dma_dec_t;
-
-
/**
* Physical Address Decode
*
@@ -116,63 +116,63 @@ typedef union {
struct {
cvmx_mips_space_t R : 2;
uint64_t offset :62;
- } sva; // mapped or unmapped virtual address
+ } sva; /* mapped or unmapped virtual address */
struct {
uint64_t zeroes :33;
uint64_t offset :31;
- } suseg; // mapped USEG virtual addresses (typically)
+ } suseg; /* mapped USEG virtual addresses (typically) */
struct {
uint64_t ones :33;
cvmx_mips_xkseg_space_t sp : 2;
uint64_t offset :29;
- } sxkseg; // mapped or unmapped virtual address
+ } sxkseg; /* mapped or unmapped virtual address */
struct {
- cvmx_mips_space_t R : 2; // CVMX_MIPS_SPACE_XKPHYS in this case
- uint64_t cca : 3; // ignored by octeon
+ cvmx_mips_space_t R : 2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
+ uint64_t cca : 3; /* ignored by octeon */
uint64_t mbz :10;
- uint64_t pa :49; // physical address
- } sxkphys; // physical address accessed through xkphys unmapped virtual address
+ uint64_t pa :49; /* physical address */
+ } sxkphys; /* physical address accessed through xkphys unmapped virtual address */
struct {
uint64_t mbz :15;
- uint64_t is_io : 1; // if set, the address is uncached and resides on MCB bus
- uint64_t did : 8; // the hardware ignores this field when is_io==0, else device ID
- uint64_t unaddr: 4; // the hardware ignores <39:36> in Octeon I
+ uint64_t is_io : 1; /* if set, the address is uncached and resides on MCB bus */
+ uint64_t did : 8; /* the hardware ignores this field when is_io==0, else device ID */
+ uint64_t unaddr: 4; /* the hardware ignores <39:36> in Octeon I */
uint64_t offset :36;
- } sphys; // physical address
+ } sphys; /* physical address */
struct {
- uint64_t zeroes :24; // techically, <47:40> are dont-cares
- uint64_t unaddr: 4; // the hardware ignores <39:36> in Octeon I
+ uint64_t zeroes :24; /* techically, <47:40> are dont-cares */
+ uint64_t unaddr: 4; /* the hardware ignores <39:36> in Octeon I */
uint64_t offset :36;
- } smem; // physical mem address
+ } smem; /* physical mem address */
struct {
uint64_t mem_region :2;
uint64_t mbz :13;
- uint64_t is_io : 1; // 1 in this case
- uint64_t did : 8; // the hardware ignores this field when is_io==0, else device ID
- uint64_t unaddr: 4; // the hardware ignores <39:36> in Octeon I
+ uint64_t is_io : 1; /* 1 in this case */
+ uint64_t did : 8; /* the hardware ignores this field when is_io==0, else device ID */
+ uint64_t unaddr: 4; /* the hardware ignores <39:36> in Octeon I */
uint64_t offset :36;
- } sio; // physical IO address
+ } sio; /* physical IO address */
struct {
uint64_t ones : 49;
- cvmx_add_win_dec_t csrdec : 2; // CVMX_ADD_WIN_SCR (0) in this case
+ cvmx_add_win_dec_t csrdec : 2; /* CVMX_ADD_WIN_SCR (0) in this case */
uint64_t addr : 13;
- } sscr; // scratchpad virtual address - accessed through a window at the end of kseg3
+ } sscr; /* scratchpad virtual address - accessed through a window at the end of kseg3 */
- // there should only be stores to IOBDMA space, no loads
+ /* there should only be stores to IOBDMA space, no loads */
struct {
uint64_t ones : 49;
- cvmx_add_win_dec_t csrdec : 2; // CVMX_ADD_WIN_DMA (1) in this case
+ cvmx_add_win_dec_t csrdec : 2; /* CVMX_ADD_WIN_DMA (1) in this case */
uint64_t unused2: 3;
cvmx_add_win_dma_dec_t type : 3;
uint64_t addr : 7;
- } sdma; // IOBDMA virtual address - accessed through a window at the end of kseg3
+ } sdma; /* IOBDMA virtual address - accessed through a window at the end of kseg3 */
struct {
uint64_t didspace : 24;
@@ -203,8 +203,8 @@ typedef union {
#define CVMX_FULL_DID(did,subdid) (((did) << 3) | (subdid))
-// from include/ncb_rsl_id.v
-#define CVMX_OCT_DID_MIS 0ULL // misc stuff
+ /* from include/ncb_rsl_id.v */
+#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */
#define CVMX_OCT_DID_GMX0 1ULL
#define CVMX_OCT_DID_GMX1 2ULL
#define CVMX_OCT_DID_PCI 3ULL
@@ -217,7 +217,7 @@ typedef union {
#define CVMX_OCT_DID_PKT 10ULL
#define CVMX_OCT_DID_TIM 11ULL
#define CVMX_OCT_DID_TAG 12ULL
-// the rest are not on the IO bus
+ /* the rest are not on the IO bus */
#define CVMX_OCT_DID_L2C 16ULL
#define CVMX_OCT_DID_LMC 17ULL
#define CVMX_OCT_DID_SPX0 18ULL
diff --git a/cvmx-agl-defs.h b/cvmx-agl-defs.h
new file mode 100644
index 000000000000..2138f90a19fd
--- /dev/null
+++ b/cvmx-agl-defs.h
@@ -0,0 +1,4615 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-agl-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon agl.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_AGL_TYPEDEFS_H__
+#define __CVMX_AGL_TYPEDEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_BAD_REG CVMX_AGL_GMX_BAD_REG_FUNC()
+static inline uint64_t CVMX_AGL_GMX_BAD_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_BAD_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000518ull);
+}
+#else
+#define CVMX_AGL_GMX_BAD_REG (CVMX_ADD_IO_SEG(0x00011800E0000518ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_BIST CVMX_AGL_GMX_BIST_FUNC()
+static inline uint64_t CVMX_AGL_GMX_BIST_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_BIST not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000400ull);
+}
+#else
+#define CVMX_AGL_GMX_BIST (CVMX_ADD_IO_SEG(0x00011800E0000400ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_DRV_CTL CVMX_AGL_GMX_DRV_CTL_FUNC()
+static inline uint64_t CVMX_AGL_GMX_DRV_CTL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_DRV_CTL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00007F0ull);
+}
+#else
+#define CVMX_AGL_GMX_DRV_CTL (CVMX_ADD_IO_SEG(0x00011800E00007F0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_INF_MODE CVMX_AGL_GMX_INF_MODE_FUNC()
+static inline uint64_t CVMX_AGL_GMX_INF_MODE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_INF_MODE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00007F8ull);
+}
+#else
+#define CVMX_AGL_GMX_INF_MODE (CVMX_ADD_IO_SEG(0x00011800E00007F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_PRTX_CFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_PRTX_CFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM5(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM5(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM_EN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM_EN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_ADR_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_DECISION(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_DECISION(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_DECISION(offset) (CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CHK(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CHK(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_FRM_CHK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_FRM_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MAX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MAX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_FRM_MAX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MIN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MIN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_FRM_MIN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_IFG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_IFG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_IFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_INT_EN(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_INT_EN(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_INT_REG(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_INT_REG(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_INT_REG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_JABBER(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_JABBER(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_RX_INBND(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_RX_INBND(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_RX_INBND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) (CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RXX_UDD_SKP(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_UDD_SKP(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_RXX_UDD_SKP(offset) (CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RX_BP_DROPX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RX_BP_DROPX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_AGL_GMX_RX_BP_DROPX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RX_BP_OFFX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RX_BP_OFFX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_AGL_GMX_RX_BP_OFFX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_RX_BP_ONX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RX_BP_ONX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_AGL_GMX_RX_BP_ONX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_RX_PRT_INFO CVMX_AGL_GMX_RX_PRT_INFO_FUNC()
+static inline uint64_t CVMX_AGL_GMX_RX_PRT_INFO_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_RX_PRT_INFO not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004E8ull);
+}
+#else
+#define CVMX_AGL_GMX_RX_PRT_INFO (CVMX_ADD_IO_SEG(0x00011800E00004E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_RX_TX_STATUS CVMX_AGL_GMX_RX_TX_STATUS_FUNC()
+static inline uint64_t CVMX_AGL_GMX_RX_TX_STATUS_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_RX_TX_STATUS not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00007E8ull);
+}
+#else
+#define CVMX_AGL_GMX_RX_TX_STATUS (CVMX_ADD_IO_SEG(0x00011800E00007E8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_SMACX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_SMACX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_SMACX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_STAT_BP CVMX_AGL_GMX_STAT_BP_FUNC()
+static inline uint64_t CVMX_AGL_GMX_STAT_BP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_STAT_BP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000520ull);
+}
+#else
+#define CVMX_AGL_GMX_STAT_BP (CVMX_ADD_IO_SEG(0x00011800E0000520ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_APPEND(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_APPEND(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_APPEND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_CLK(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_CLK(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_CLK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_MIN_PKT(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_MIN_PKT(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_MIN_PKT(offset) (CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_TOGO(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_TOGO(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_ZERO(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_ZERO(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_SOFT_PAUSE(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_SOFT_PAUSE(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) (CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT2(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT2(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT3(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT3(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT5(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT5(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT6(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT6(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT7(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT7(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT8(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT8(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT9(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT9(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_STATS_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_GMX_TXX_THRESH(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_THRESH(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048;
+}
+#else
+#define CVMX_AGL_GMX_TXX_THRESH(offset) (CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_BP CVMX_AGL_GMX_TX_BP_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_BP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_BP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004D0ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_BP (CVMX_ADD_IO_SEG(0x00011800E00004D0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_COL_ATTEMPT CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_COL_ATTEMPT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000498ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_COL_ATTEMPT (CVMX_ADD_IO_SEG(0x00011800E0000498ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_IFG CVMX_AGL_GMX_TX_IFG_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_IFG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_IFG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000488ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_IFG (CVMX_ADD_IO_SEG(0x00011800E0000488ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_INT_EN CVMX_AGL_GMX_TX_INT_EN_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_INT_EN_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_INT_EN not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000508ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_INT_EN (CVMX_ADD_IO_SEG(0x00011800E0000508ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_INT_REG CVMX_AGL_GMX_TX_INT_REG_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_INT_REG_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_INT_REG not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000500ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_INT_REG (CVMX_ADD_IO_SEG(0x00011800E0000500ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_JAM CVMX_AGL_GMX_TX_JAM_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_JAM_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_JAM not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E0000490ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_JAM (CVMX_ADD_IO_SEG(0x00011800E0000490ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_LFSR CVMX_AGL_GMX_TX_LFSR_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_LFSR_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_LFSR not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004F8ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_LFSR (CVMX_ADD_IO_SEG(0x00011800E00004F8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_OVR_BP CVMX_AGL_GMX_TX_OVR_BP_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_OVR_BP_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_OVR_BP not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004C8ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_OVR_BP (CVMX_ADD_IO_SEG(0x00011800E00004C8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004A0ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC (CVMX_ADD_IO_SEG(0x00011800E00004A0ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800E00004A8ull);
+}
+#else
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE (CVMX_ADD_IO_SEG(0x00011800E00004A8ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_AGL_PRTX_CTL(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_PRTX_CTL(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8;
+}
+#else
+#define CVMX_AGL_PRTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8)
+#endif
+
+/**
+ * cvmx_agl_gmx_bad_reg
+ *
+ * AGL_GMX_BAD_REG = A collection of things that have gone very, very wrong
+ *
+ *
+ * Notes:
+ * OUT_OVR[0], LOSTSTAT[0], OVRFLW, TXPOP, TXPSH will be reset when MIX0_CTL[RESET] is set to 1.
+ * OUT_OVR[1], LOSTSTAT[1], OVRFLW1, TXPOP1, TXPSH1 will be reset when MIX1_CTL[RESET] is set to 1.
+ * STATOVR will be reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+union cvmx_agl_gmx_bad_reg
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_bad_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_38_63 : 26;
+ uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */
+ uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */
+ uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */
+ uint64_t txpsh : 1; /**< TX FIFO overflow (MII0) */
+ uint64_t txpop : 1; /**< TX FIFO underflow (MII0) */
+ uint64_t ovrflw : 1; /**< RX FIFO overflow (MII0) */
+ uint64_t reserved_27_31 : 5;
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t reserved_24_25 : 2;
+ uint64_t loststat : 2; /**< TX Statistics data was over-written
+ In MII/RGMII, one bit per port
+ TX Stats are corrupted */
+ uint64_t reserved_4_21 : 18;
+ uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 2;
+ uint64_t reserved_4_21 : 18;
+ uint64_t loststat : 2;
+ uint64_t reserved_24_25 : 2;
+ uint64_t statovr : 1;
+ uint64_t reserved_27_31 : 5;
+ uint64_t ovrflw : 1;
+ uint64_t txpop : 1;
+ uint64_t txpsh : 1;
+ uint64_t ovrflw1 : 1;
+ uint64_t txpop1 : 1;
+ uint64_t txpsh1 : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } s;
+ struct cvmx_agl_gmx_bad_reg_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_38_63 : 26;
+ uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */
+ uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */
+ uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */
+ uint64_t txpsh : 1; /**< TX FIFO overflow (MII0) */
+ uint64_t txpop : 1; /**< TX FIFO underflow (MII0) */
+ uint64_t ovrflw : 1; /**< RX FIFO overflow (MII0) */
+ uint64_t reserved_27_31 : 5;
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t reserved_23_25 : 3;
+ uint64_t loststat : 1; /**< TX Statistics data was over-written
+ TX Stats are corrupted */
+ uint64_t reserved_4_21 : 18;
+ uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 2;
+ uint64_t reserved_4_21 : 18;
+ uint64_t loststat : 1;
+ uint64_t reserved_23_25 : 3;
+ uint64_t statovr : 1;
+ uint64_t reserved_27_31 : 5;
+ uint64_t ovrflw : 1;
+ uint64_t txpop : 1;
+ uint64_t txpsh : 1;
+ uint64_t ovrflw1 : 1;
+ uint64_t txpop1 : 1;
+ uint64_t txpsh1 : 1;
+ uint64_t reserved_38_63 : 26;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_bad_reg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_bad_reg_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_35_63 : 29;
+ uint64_t txpsh : 1; /**< TX FIFO overflow */
+ uint64_t txpop : 1; /**< TX FIFO underflow */
+ uint64_t ovrflw : 1; /**< RX FIFO overflow */
+ uint64_t reserved_27_31 : 5;
+ uint64_t statovr : 1; /**< TX Statistics overflow */
+ uint64_t reserved_23_25 : 3;
+ uint64_t loststat : 1; /**< TX Statistics data was over-written
+ TX Stats are corrupted */
+ uint64_t reserved_3_21 : 19;
+ uint64_t out_ovr : 1; /**< Outbound data FIFO overflow */
+ uint64_t reserved_0_1 : 2;
+#else
+ uint64_t reserved_0_1 : 2;
+ uint64_t out_ovr : 1;
+ uint64_t reserved_3_21 : 19;
+ uint64_t loststat : 1;
+ uint64_t reserved_23_25 : 3;
+ uint64_t statovr : 1;
+ uint64_t reserved_27_31 : 5;
+ uint64_t ovrflw : 1;
+ uint64_t txpop : 1;
+ uint64_t txpsh : 1;
+ uint64_t reserved_35_63 : 29;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_bad_reg_s cn63xx;
+ struct cvmx_agl_gmx_bad_reg_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_bad_reg cvmx_agl_gmx_bad_reg_t;
+
+/**
+ * cvmx_agl_gmx_bist
+ *
+ * AGL_GMX_BIST = GMX BIST Results
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_bist
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_bist_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_25_63 : 39;
+ uint64_t status : 25; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.fif_bnk0
+ - 1: gmx#.inb.fif_bnk1
+ - 2: gmx#.inb.fif_bnk2
+ - 3: gmx#.inb.fif_bnk3
+ - 4: gmx#.inb.fif_bnk_ext0
+ - 5: gmx#.inb.fif_bnk_ext1
+ - 6: gmx#.inb.fif_bnk_ext2
+ - 7: gmx#.inb.fif_bnk_ext3
+ - 8: gmx#.outb.fif.fif_bnk0
+ - 9: gmx#.outb.fif.fif_bnk1
+ - 10: RAZ
+ - 11: RAZ
+ - 12: gmx#.outb.fif.fif_bnk_ext0
+ - 13: gmx#.outb.fif.fif_bnk_ext1
+ - 14: RAZ
+ - 15: RAZ
+ - 16: gmx#.csr.gmi0.srf8x64m1_bist
+ - 17: gmx#.csr.gmi1.srf8x64m1_bist
+ - 18: RAZ
+ - 19: RAZ
+ - 20: gmx#.csr.drf20x32m2_bist
+ - 21: gmx#.csr.drf20x48m2_bist
+ - 22: gmx#.outb.stat.drf16x27m1_bist
+ - 23: gmx#.outb.stat.drf40x64m1_bist
+ - 24: RAZ */
+#else
+ uint64_t status : 25;
+ uint64_t reserved_25_63 : 39;
+#endif
+ } s;
+ struct cvmx_agl_gmx_bist_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t status : 10; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ - 0: gmx#.inb.drf128x78m1_bist
+ - 1: gmx#.outb.fif.drf128x71m1_bist
+ - 2: gmx#.csr.gmi0.srf8x64m1_bist
+ - 3: gmx#.csr.gmi1.srf8x64m1_bist
+ - 4: 0
+ - 5: 0
+ - 6: gmx#.csr.drf20x80m1_bist
+ - 7: gmx#.outb.stat.drf16x27m1_bist
+ - 8: gmx#.outb.stat.drf40x64m1_bist
+ - 9: 0 */
+#else
+ uint64_t status : 10;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_bist_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_bist_cn52xx cn56xx;
+ struct cvmx_agl_gmx_bist_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_bist_s cn63xx;
+ struct cvmx_agl_gmx_bist_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_bist cvmx_agl_gmx_bist_t;
+
+/**
+ * cvmx_agl_gmx_drv_ctl
+ *
+ * AGL_GMX_DRV_CTL = GMX Drive Control
+ *
+ *
+ * Notes:
+ * NCTL, PCTL, BYP_EN will be reset when MIX0_CTL[RESET] is set to 1.
+ * NCTL1, PCTL1, BYP_EN1 will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_drv_ctl
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_drv_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_49_63 : 15;
+ uint64_t byp_en1 : 1; /**< Compensation Controller Bypass Enable (MII1) */
+ uint64_t reserved_45_47 : 3;
+ uint64_t pctl1 : 5; /**< AGL PCTL (MII1) */
+ uint64_t reserved_37_39 : 3;
+ uint64_t nctl1 : 5; /**< AGL NCTL (MII1) */
+ uint64_t reserved_17_31 : 15;
+ uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pctl : 5; /**< AGL PCTL */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< AGL NCTL */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t byp_en : 1;
+ uint64_t reserved_17_31 : 15;
+ uint64_t nctl1 : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t pctl1 : 5;
+ uint64_t reserved_45_47 : 3;
+ uint64_t byp_en1 : 1;
+ uint64_t reserved_49_63 : 15;
+#endif
+ } s;
+ struct cvmx_agl_gmx_drv_ctl_s cn52xx;
+ struct cvmx_agl_gmx_drv_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_drv_ctl_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pctl : 5; /**< AGL PCTL */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< AGL NCTL */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t byp_en : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_drv_ctl_cn56xx cn56xxp1;
+};
+typedef union cvmx_agl_gmx_drv_ctl cvmx_agl_gmx_drv_ctl_t;
+
+/**
+ * cvmx_agl_gmx_inf_mode
+ *
+ * AGL_GMX_INF_MODE = Interface Mode
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_inf_mode
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_inf_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t en : 1; /**< Interface Enable */
+ uint64_t reserved_0_0 : 1;
+#else
+ uint64_t reserved_0_0 : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_agl_gmx_inf_mode_s cn52xx;
+ struct cvmx_agl_gmx_inf_mode_s cn52xxp1;
+ struct cvmx_agl_gmx_inf_mode_s cn56xx;
+ struct cvmx_agl_gmx_inf_mode_s cn56xxp1;
+};
+typedef union cvmx_agl_gmx_inf_mode cvmx_agl_gmx_inf_mode_t;
+
+/**
+ * cvmx_agl_gmx_prt#_cfg
+ *
+ * AGL_GMX_PRT_CFG = Port description
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_prtx_cfg
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_prtx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_14_63 : 50;
+ uint64_t tx_idle : 1; /**< TX Machine is idle */
+ uint64_t rx_idle : 1; /**< RX Machine is idle */
+ uint64_t reserved_9_11 : 3;
+ uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED]
+ 10 = 10Mbs operation
+ 00 = 100Mbs operation
+ 01 = 1000Mbs operation
+ 11 = Reserved */
+ uint64_t reserved_7_7 : 1;
+ uint64_t burst : 1; /**< Half-Duplex Burst Enable
+ Only valid for 1000Mbs half-duplex operation
+ 0 = burst length of 0x2000 (halfdup / 1000Mbs)
+ 1 = burst length of 0x0 (all other modes) */
+ uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all packet cycles will appear as
+ inter-frame cycles. */
+ uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all packet cycles will appear as
+ inter-frame cycles. */
+ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
+ 0 = 512 bitimes (10/100Mbs operation)
+ 1 = 4096 bitimes (1000Mbs operation) */
+ uint64_t duplex : 1; /**< Duplex
+ 0 = Half Duplex (collisions/extentions/bursts)
+ 1 = Full Duplex */
+ uint64_t speed : 1; /**< Link Speed LSB [SPEED_MSB:SPEED]
+ 10 = 10Mbs operation
+ 00 = 100Mbs operation
+ 01 = 1000Mbs operation
+ 11 = Reserved */
+ uint64_t en : 1; /**< Link Enable
+ When EN is clear, packets will not be received
+ or transmitted (including PAUSE and JAM packets).
+ If EN is cleared while a packet is currently
+ being received or transmitted, the packet will
+ be allowed to complete before the bus is idled.
+ On the RX side, subsequent packets in a burst
+ will be ignored. */
+#else
+ uint64_t en : 1;
+ uint64_t speed : 1;
+ uint64_t duplex : 1;
+ uint64_t slottime : 1;
+ uint64_t rx_en : 1;
+ uint64_t tx_en : 1;
+ uint64_t burst : 1;
+ uint64_t reserved_7_7 : 1;
+ uint64_t speed_msb : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t rx_idle : 1;
+ uint64_t tx_idle : 1;
+ uint64_t reserved_14_63 : 50;
+#endif
+ } s;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all MII cycles will appear as
+ inter-frame cycles. */
+ uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all MII cycles will appear as
+ inter-frame cycles. */
+ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
+ 0 = 512 bitimes (10/100Mbs operation)
+ 1 = Reserved */
+ uint64_t duplex : 1; /**< Duplex
+ 0 = Half Duplex (collisions/extentions/bursts)
+ 1 = Full Duplex */
+ uint64_t speed : 1; /**< Link Speed
+ 0 = 10/100Mbs operation
+ 1 = Reserved */
+ uint64_t en : 1; /**< Link Enable
+ When EN is clear, packets will not be received
+ or transmitted (including PAUSE and JAM packets).
+ If EN is cleared while a packet is currently
+ being received or transmitted, the packet will
+ be allowed to complete before the bus is idled.
+ On the RX side, subsequent packets in a burst
+ will be ignored. */
+#else
+ uint64_t en : 1;
+ uint64_t speed : 1;
+ uint64_t duplex : 1;
+ uint64_t slottime : 1;
+ uint64_t rx_en : 1;
+ uint64_t tx_en : 1;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xx;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_s cn63xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_prtx_cfg cvmx_agl_gmx_prtx_cfg_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam0
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam0
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam0 cvmx_agl_gmx_rxx_adr_cam0_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam1
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam1
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam1 cvmx_agl_gmx_rxx_adr_cam1_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam2
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam2
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam2 cvmx_agl_gmx_rxx_adr_cam2_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam3
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam3
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam3 cvmx_agl_gmx_rxx_adr_cam3_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam4
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam4
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam4 cvmx_agl_gmx_rxx_adr_cam4_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam5
+ *
+ * AGL_GMX_RX_ADR_CAM = Address Filtering Control
+ *
+ *
+ * Notes:
+ * Not reset when MIX*_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam5
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t adr : 64; /**< The DMAC address to match on
+ Each entry contributes 8bits to one of 8 matchers
+ Write transactions to AGL_GMX_RX_ADR_CAM will not
+ change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
+ The CAM matches against unicst or multicst DMAC
+ addresses. */
+#else
+ uint64_t adr : 64;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam5 cvmx_agl_gmx_rxx_adr_cam5_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_cam_en
+ *
+ * AGL_GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_adr_cam_en
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t en : 8; /**< CAM Entry Enables */
+#else
+ uint64_t en : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_cam_en cvmx_agl_gmx_rxx_adr_cam_en_t;
+
+/**
+ * cvmx_agl_gmx_rx#_adr_ctl
+ *
+ * AGL_GMX_RX_ADR_CTL = Address Filtering Control
+ *
+ *
+ * Notes:
+ * * ALGORITHM
+ * Here is some pseudo code that represents the address filter behavior.
+ *
+ * @verbatim
+ * bool dmac_addr_filter(uint8 prt, uint48 dmac) [
+ * ASSERT(prt >= 0 && prt <= 3);
+ * if (is_bcst(dmac)) // broadcast accept
+ * return (AGL_GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
+ * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject
+ * return REJECT;
+ * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept
+ * return ACCEPT;
+ *
+ * cam_hit = 0;
+ *
+ * for (i=0; i<8; i++) [
+ * if (AGL_GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0)
+ * continue;
+ * uint48 unswizzled_mac_adr = 0x0;
+ * for (j=5; j>=0; j--) [
+ * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | AGL_GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>];
+ * ]
+ * if (unswizzled_mac_adr == dmac) [
+ * cam_hit = 1;
+ * break;
+ * ]
+ * ]
+ *
+ * if (cam_hit)
+ * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
+ * else
+ * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
+ * ]
+ * @endverbatim
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_adr_ctl
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter
+ 0 = reject the packet on DMAC address match
+ 1 = accept the packet on DMAC address match */
+ uint64_t mcst : 2; /**< Multicast Mode
+ 0 = Use the Address Filter CAM
+ 1 = Force reject all multicast packets
+ 2 = Force accept all multicast packets
+ 3 = Reserved */
+ uint64_t bcst : 1; /**< Accept All Broadcast Packets */
+#else
+ uint64_t bcst : 1;
+ uint64_t mcst : 2;
+ uint64_t cam_mode : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_adr_ctl cvmx_agl_gmx_rxx_adr_ctl_t;
+
+/**
+ * cvmx_agl_gmx_rx#_decision
+ *
+ * AGL_GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
+ *
+ *
+ * Notes:
+ * As each byte in a packet is received by GMX, the L2 byte count is compared
+ * against the AGL_GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes
+ * from the beginning of the L2 header (DMAC). In normal operation, the L2
+ * header begins after the PREAMBLE+SFD (AGL_GMX_RX_FRM_CTL[PRE_CHK]=1) and any
+ * optional UDD skip data (AGL_GMX_RX_UDD_SKP[LEN]).
+ *
+ * When AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
+ * packet and would require UDD skip length to account for them.
+ *
+ * L2 Size
+ * Port Mode <=AGL_GMX_RX_DECISION bytes (default=24) >AGL_GMX_RX_DECISION bytes (default=24)
+ *
+ * MII/Full Duplex accept packet apply filters
+ * no filtering is applied accept packet based on DMAC and PAUSE packet filters
+ *
+ * MII/Half Duplex drop packet apply filters
+ * packet is unconditionally dropped accept packet based on DMAC
+ *
+ * where l2_size = MAX(0, total_packet_size - AGL_GMX_RX_UDD_SKP[LEN] - ((AGL_GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_decision
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_decision_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t cnt : 5; /**< The byte count to decide when to accept or filter
+ a packet. */
+#else
+ uint64_t cnt : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_decision_s cn52xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_decision_s cn56xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_decision_s cn63xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_decision cvmx_agl_gmx_rxx_decision_t;
+
+/**
+ * cvmx_agl_gmx_rx#_frm_chk
+ *
+ * AGL_GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
+ *
+ *
+ * Notes:
+ * If AGL_GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_frm_chk
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_chk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t niberr : 1; /**< Nibble error */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with packet data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< Carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t reserved_1_1 : 1;
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_frm_chk cvmx_agl_gmx_rxx_frm_chk_t;
+
+/**
+ * cvmx_agl_gmx_rx#_frm_ctl
+ *
+ * AGL_GMX_RX_FRM_CTL = Frame Control
+ *
+ *
+ * Notes:
+ * * PRE_STRP
+ * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
+ * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
+ * core as part of the packet.
+ *
+ * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
+ * size when checking against the MIN and MAX bounds. Furthermore, the bytes
+ * are skipped when locating the start of the L2 header for DMAC and Control
+ * frame recognition.
+ *
+ * * CTL_BCK/CTL_DRP
+ * These bits control how the HW handles incoming PAUSE packets. Here are
+ * the most common modes of operation:
+ * CTL_BCK=1,CTL_DRP=1 - HW does it all
+ * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames
+ * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored
+ *
+ * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
+ * Since PAUSE packets only apply to fulldup operation, any PAUSE packet
+ * would constitute an exception which should be handled by the processing
+ * cores. PAUSE packets should not be forwarded.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_frm_ctl
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_13_63 : 51;
+ uint64_t ptp_mode : 1; /**< Timestamp mode
+ When PTP_MODE is set, a 64-bit timestamp will be
+ prepended to every incoming packet. The timestamp
+ bytes are added to the packet in such a way as to
+ not modify the packet's receive byte count. This
+ implies that the AGL_GMX_RX_JABBER,
+ AGL_GMX_RX_FRM_MIN, AGL_GMX_RX_FRM_MAX,
+ AGL_GMX_RX_DECISION, AGL_GMX_RX_UDD_SKP, and the
+ AGL_GMX_RX_STATS_* do not require any adjustment
+ as they operate on the received packet size.
+ If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
+ uint64_t reserved_11_11 : 1;
+ uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
+ due to PARITAL packets */
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PRE_STRP should be set to
+ account for the variable nature of the PREAMBLE.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pad_len : 1; /**< When set, disables the length check for non-min
+ sized pkts with padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ AGL will begin the frame at the first SFD.
+ PRE_FREE must be set if PRE_ALIGN is set.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped
+ PRE_STRP must be set if PRE_ALIGN is set.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3
+ PREAMBLE to begin every frame. AGL checks that a
+ valid PREAMBLE is received (based on PRE_FREE).
+ When a problem does occur within the PREAMBLE
+ seqeunce, the frame is marked as bad and not sent
+ into the core. The AGL_GMX_RX_INT_REG[PCTERR]
+ interrupt is also raised. */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t pre_align : 1;
+ uint64_t null_dis : 1;
+ uint64_t reserved_11_11 : 1;
+ uint64_t ptp_mode : 1;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
+ regardless of the number of previous PREAMBLE
+ nibbles. In this mode, PREAMBLE can be consumed
+ by the HW so when PRE_ALIGN is set, PRE_FREE,
+ PRE_STRP must be set for correct operation.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pad_len : 1; /**< When set, disables the length check for non-min
+ sized pkts with padding in the client data */
+ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
+ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
+ 0 - 254 cycles of PREAMBLE followed by SFD
+ PRE_FREE must be set if PRE_ALIGN is set.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
+ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
+ Multicast address */
+ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
+ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
+ uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
+ 0=PREAMBLE+SFD is sent to core as part of frame
+ 1=PREAMBLE+SFD is dropped
+ PRE_STRP must be set if PRE_ALIGN is set.
+ PRE_CHK must be set to enable this and all
+ PREAMBLE features. */
+ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
+ to begin every frame. GMX checks that the
+ PREAMBLE is sent correctly */
+#else
+ uint64_t pre_chk : 1;
+ uint64_t pre_strp : 1;
+ uint64_t ctl_drp : 1;
+ uint64_t ctl_bck : 1;
+ uint64_t ctl_mcst : 1;
+ uint64_t ctl_smac : 1;
+ uint64_t pre_free : 1;
+ uint64_t vlan_len : 1;
+ uint64_t pad_len : 1;
+ uint64_t pre_align : 1;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_frm_ctl cvmx_agl_gmx_rxx_frm_ctl_t;
+
+/**
+ * cvmx_agl_gmx_rx#_frm_max
+ *
+ * AGL_GMX_RX_FRM_MAX = Frame Max length
+ *
+ *
+ * Notes:
+ * When changing the LEN field, be sure that LEN does not exceed
+ * AGL_GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
+ * are within the maximum length parameter to be rejected because they exceed
+ * the AGL_GMX_RX_JABBER[CNT] limit.
+ *
+ * Notes:
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_frm_max
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_max_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t len : 16; /**< Byte count for Max-sized frame check
+ AGL_GMX_RXn_FRM_CHK[MAXERR] enables the check
+ for port n.
+ If enabled, failing packets set the MAXERR
+ interrupt and the MIX opcode is set to OVER_FCS
+ (0x3, if packet has bad FCS) or OVER_ERR (0x4, if
+ packet has good FCS).
+ LEN <= AGL_GMX_RX_JABBER[CNT] */
+#else
+ uint64_t len : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_frm_max cvmx_agl_gmx_rxx_frm_max_t;
+
+/**
+ * cvmx_agl_gmx_rx#_frm_min
+ *
+ * AGL_GMX_RX_FRM_MIN = Frame Min length
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_frm_min
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_min_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t len : 16; /**< Byte count for Min-sized frame check
+ AGL_GMX_RXn_FRM_CHK[MINERR] enables the check
+ for port n.
+ If enabled, failing packets set the MINERR
+ interrupt and the MIX opcode is set to UNDER_FCS
+ (0x6, if packet has bad FCS) or UNDER_ERR (0x8,
+ if packet has good FCS). */
+#else
+ uint64_t len : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_frm_min cvmx_agl_gmx_rxx_frm_min_t;
+
+/**
+ * cvmx_agl_gmx_rx#_ifg
+ *
+ * AGL_GMX_RX_IFG = RX Min IFG
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_ifg
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_ifg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t ifg : 4; /**< Min IFG (in IFG*8 bits) between packets used to
+ determine IFGERR. Normally IFG is 96 bits.
+ Note in some operating modes, IFG cycles can be
+ inserted or removed in order to achieve clock rate
+ adaptation. For these reasons, the default value
+ is slightly conservative and does not check upto
+ the full 96 bits of IFG. */
+#else
+ uint64_t ifg : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_ifg_s cn52xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_ifg_s cn56xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_ifg_s cn63xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_ifg cvmx_agl_gmx_rxx_ifg_t;
+
+/**
+ * cvmx_agl_gmx_rx#_int_en
+ *
+ * AGL_GMX_RX_INT_EN = Interrupt Enable
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_int_en
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_int_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex | NS */
+ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed | NS */
+ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus | NS */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< Packet reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) | NS */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< Carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< MII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t reserved_1_1 : 1;
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_s cn63xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_int_en cvmx_agl_gmx_rxx_int_en_t;
+
+/**
+ * cvmx_agl_gmx_rx#_int_reg
+ *
+ * AGL_GMX_RX_INT_REG = Interrupt Register
+ *
+ *
+ * Notes:
+ * (1) exceptions will only be raised to the control processor if the
+ * corresponding bit in the AGL_GMX_RX_INT_EN register is set.
+ *
+ * (2) exception conditions 10:0 can also set the rcv/opcode in the received
+ * packet's workQ entry. The AGL_GMX_RX_FRM_CHK register provides a bit mask
+ * for configuring which conditions set the error.
+ *
+ * (3) in half duplex operation, the expectation is that collisions will appear
+ * as MINERRs.
+ *
+ * (4) JABBER - An RX Jabber error indicates that a packet was received which
+ * is longer than the maximum allowed packet as defined by the
+ * system. GMX will truncate the packet at the JABBER count.
+ * Failure to do so could lead to system instabilty.
+ *
+ * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
+ * AGL_GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
+ * > AGL_GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
+ *
+ * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < AGL_GMX_RX_FRM_MIN.
+ *
+ * (8) ALNERR - Indicates that the packet received was not an integer number of
+ * bytes. If FCS checking is enabled, ALNERR will only assert if
+ * the FCS is bad. If FCS checking is disabled, ALNERR will
+ * assert in all non-integer frame cases.
+ *
+ * (9) Collisions - Collisions can only occur in half-duplex mode. A collision
+ * is assumed by the receiver when the received
+ * frame < AGL_GMX_RX_FRM_MIN - this is normally a MINERR
+ *
+ * (A) LENERR - Length errors occur when the received packet does not match the
+ * length field. LENERR is only checked for packets between 64
+ * and 1500 bytes. For untagged frames, the length must exact
+ * match. For tagged frames the length or length+4 must match.
+ *
+ * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence.
+ * Does not check the number of PREAMBLE cycles.
+ *
+ * (C) OVRERR - Not to be included in the HRM
+ *
+ * OVRERR is an architectural assertion check internal to GMX to
+ * make sure no assumption was violated. In a correctly operating
+ * system, this interrupt can never fire.
+ *
+ * GMX has an internal arbiter which selects which of 4 ports to
+ * buffer in the main RX FIFO. If we normally buffer 8 bytes,
+ * then each port will typically push a tick every 8 cycles - if
+ * the packet interface is going as fast as possible. If there
+ * are four ports, they push every two cycles. So that's the
+ * assumption. That the inbound module will always be able to
+ * consume the tick before another is produced. If that doesn't
+ * happen - that's when OVRERR will assert.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_int_reg
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_int_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t phy_dupx : 1; /**< Change in the RGMII inbound LinkDuplex | NS */
+ uint64_t phy_spd : 1; /**< Change in the RGMII inbound LinkSpeed | NS */
+ uint64_t phy_link : 1; /**< Change in the RGMII inbound LinkStatus | NS */
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< Packet reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) | NS */
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with Packet Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t carext : 1; /**< Carrier extend error */
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t carext : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t niberr : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t phy_link : 1;
+ uint64_t phy_spd : 1;
+ uint64_t phy_dupx : 1;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
+ uint64_t reserved_16_18 : 3;
+ uint64_t ifgerr : 1; /**< Interframe Gap Violation
+ Does not necessarily indicate a failure */
+ uint64_t coldet : 1; /**< Collision Detection */
+ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
+ uint64_t rsverr : 1; /**< MII reserved opcodes */
+ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
+ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
+ This interrupt should never assert */
+ uint64_t reserved_9_9 : 1;
+ uint64_t skperr : 1; /**< Skipper error */
+ uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */
+ uint64_t lenerr : 1; /**< Frame was received with length error */
+ uint64_t alnerr : 1; /**< Frame was received with an alignment error */
+ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< Frame was received with length > sys_length */
+ uint64_t maxerr : 1; /**< Frame was received with length > max_length */
+ uint64_t reserved_1_1 : 1;
+ uint64_t minerr : 1; /**< Frame was received with length < min_length */
+#else
+ uint64_t minerr : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t maxerr : 1;
+ uint64_t jabber : 1;
+ uint64_t fcserr : 1;
+ uint64_t alnerr : 1;
+ uint64_t lenerr : 1;
+ uint64_t rcverr : 1;
+ uint64_t skperr : 1;
+ uint64_t reserved_9_9 : 1;
+ uint64_t ovrerr : 1;
+ uint64_t pcterr : 1;
+ uint64_t rsverr : 1;
+ uint64_t falerr : 1;
+ uint64_t coldet : 1;
+ uint64_t ifgerr : 1;
+ uint64_t reserved_16_18 : 3;
+ uint64_t pause_drp : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn63xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_int_reg cvmx_agl_gmx_rxx_int_reg_t;
+
+/**
+ * cvmx_agl_gmx_rx#_jabber
+ *
+ * AGL_GMX_RX_JABBER = The max size packet after which GMX will truncate
+ *
+ *
+ * Notes:
+ * CNT must be 8-byte aligned such that CNT[2:0] == 0
+ *
+ * The packet that will be sent to the packet input logic will have an
+ * additionl 8 bytes if AGL_GMX_RX_FRM_CTL[PRE_CHK] is set and
+ * AGL_GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is
+ * defined as...
+ *
+ * max_sized_packet = AGL_GMX_RX_JABBER[CNT]+((AGL_GMX_RX_FRM_CTL[PRE_CHK] & !AGL_GMX_RX_FRM_CTL[PRE_STRP])*8)
+ *
+ * Be sure the CNT field value is at least as large as the
+ * AGL_GMX_RX_FRM_MAX[LEN] value. Failure to meet this constraint will cause
+ * packets that are within the AGL_GMX_RX_FRM_MAX[LEN] length to be rejected
+ * because they exceed the CNT limit.
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_jabber
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_jabber_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt : 16; /**< Byte count for jabber check
+ Failing packets set the JABBER interrupt and are
+ optionally sent with opcode==JABBER
+ GMX will truncate the packet to CNT bytes
+ CNT >= AGL_GMX_RX_FRM_MAX[LEN] */
+#else
+ uint64_t cnt : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_jabber_s cn52xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_jabber_s cn56xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_jabber_s cn63xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_jabber cvmx_agl_gmx_rxx_jabber_t;
+
+/**
+ * cvmx_agl_gmx_rx#_pause_drop_time
+ *
+ * AGL_GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_pause_drop_time
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */
+#else
+ uint64_t status : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_pause_drop_time cvmx_agl_gmx_rxx_pause_drop_time_t;
+
+/**
+ * cvmx_agl_gmx_rx#_rx_inbnd
+ *
+ * AGL_GMX_RX_INBND = RGMII InBand Link Status
+ *
+ *
+ * Notes:
+ * These fields are only valid if the attached PHY is operating in RGMII mode
+ * and supports the optional in-band status (see section 3.4.1 of the RGMII
+ * specification, version 1.3 for more information).
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_rx_inbnd
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t duplex : 1; /**< RGMII Inbound LinkDuplex | NS
+ 0=half-duplex
+ 1=full-duplex */
+ uint64_t speed : 2; /**< RGMII Inbound LinkSpeed | NS
+ 00=2.5MHz
+ 01=25MHz
+ 10=125MHz
+ 11=Reserved */
+ uint64_t status : 1; /**< RGMII Inbound LinkStatus | NS
+ 0=down
+ 1=up */
+#else
+ uint64_t status : 1;
+ uint64_t speed : 2;
+ uint64_t duplex : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xx;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_rx_inbnd cvmx_agl_gmx_rxx_rx_inbnd_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_ctl
+ *
+ * AGL_GMX_RX_STATS_CTL = RX Stats Control register
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rxx_stats_ctl
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */
+#else
+ uint64_t rd_clr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_ctl cvmx_agl_gmx_rxx_stats_ctl_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_octs
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of received good packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_octs cvmx_agl_gmx_rxx_stats_octs_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs_ctl
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_octs_ctl
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of received pause packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_octs_ctl cvmx_agl_gmx_rxx_stats_octs_ctl_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs_dmac
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_octs_dmac
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of filtered dmac packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_octs_dmac cvmx_agl_gmx_rxx_stats_octs_dmac_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_octs_drp
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_octs_drp
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< Octet count of dropped packets */
+#else
+ uint64_t cnt : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_octs_drp cvmx_agl_gmx_rxx_stats_octs_drp_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts
+ *
+ * AGL_GMX_RX_STATS_PKTS
+ *
+ * Count of good received packets - packets that are not recognized as PAUSE
+ * packets, dropped due the DMAC filter, dropped due FIFO full status, or
+ * have any other OPCODE (FCS, Length, etc).
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_pkts
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of received good packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_pkts cvmx_agl_gmx_rxx_stats_pkts_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_bad
+ *
+ * AGL_GMX_RX_STATS_PKTS_BAD
+ *
+ * Count of all packets received with some error that were not dropped
+ * either due to the dmac filter or lack of room in the receive FIFO.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_pkts_bad
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of bad packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_pkts_bad cvmx_agl_gmx_rxx_stats_pkts_bad_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_ctl
+ *
+ * AGL_GMX_RX_STATS_PKTS_CTL
+ *
+ * Count of all packets received that were recognized as Flow Control or
+ * PAUSE packets. PAUSE packets with any kind of error are counted in
+ * AGL_GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or
+ * forwarded based on the AGL_GMX_RX_FRM_CTL[CTL_DRP] bit. This count
+ * increments regardless of whether the packet is dropped. Pause packets
+ * will never be counted in AGL_GMX_RX_STATS_PKTS. Packets dropped due the dmac
+ * filter will be counted in AGL_GMX_RX_STATS_PKTS_DMAC and not here.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_pkts_ctl
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of received pause packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_pkts_ctl cvmx_agl_gmx_rxx_stats_pkts_ctl_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_dmac
+ *
+ * AGL_GMX_RX_STATS_PKTS_DMAC
+ *
+ * Count of all packets received that were dropped by the dmac filter.
+ * Packets that match the DMAC will be dropped and counted here regardless
+ * of if they were bad packets. These packets will never be counted in
+ * AGL_GMX_RX_STATS_PKTS.
+ *
+ * Some packets that were not able to satisify the DECISION_CNT may not
+ * actually be dropped by Octeon, but they will be counted here as if they
+ * were dropped.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_pkts_dmac
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of filtered dmac packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_pkts_dmac cvmx_agl_gmx_rxx_stats_pkts_dmac_t;
+
+/**
+ * cvmx_agl_gmx_rx#_stats_pkts_drp
+ *
+ * AGL_GMX_RX_STATS_PKTS_DRP
+ *
+ * Count of all packets received that were dropped due to a full receive
+ * FIFO. This counts good and bad packets received - all packets dropped by
+ * the FIFO. It does not count packets dropped by the dmac or pause packet
+ * filters.
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_stats_pkts_drp
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< Count of dropped packets */
+#else
+ uint64_t cnt : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_stats_pkts_drp cvmx_agl_gmx_rxx_stats_pkts_drp_t;
+
+/**
+ * cvmx_agl_gmx_rx#_udd_skp
+ *
+ * AGL_GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data
+ *
+ *
+ * Notes:
+ * (1) The skip bytes are part of the packet and will be sent down the NCB
+ * packet interface and will be handled by PKI.
+ *
+ * (2) The system can determine if the UDD bytes are included in the FCS check
+ * by using the FCSSEL field - if the FCS check is enabled.
+ *
+ * (3) Assume that the preamble/sfd is always at the start of the frame - even
+ * before UDD bytes. In most cases, there will be no preamble in these
+ * cases since it will be MII to MII communication without a PHY
+ * involved.
+ *
+ * (4) We can still do address filtering and control packet filtering is the
+ * user desires.
+ *
+ * (5) UDD_SKP must be 0 in half-duplex operation unless
+ * AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear. If AGL_GMX_RX_FRM_CTL[PRE_CHK] is set,
+ * then UDD_SKP will normally be 8.
+ *
+ * (6) In all cases, the UDD bytes will be sent down the packet interface as
+ * part of the packet. The UDD bytes are never stripped from the actual
+ * packet.
+ *
+ * (7) If LEN != 0, then AGL_GMX_RX_FRM_CHK[LENERR] will be disabled and AGL_GMX_RX_INT_REG[LENERR] will be zero
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rxx_udd_skp
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_udd_skp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation
+ 0 = all skip bytes are included in FCS
+ 1 = the skip bytes are not included in FCS */
+ uint64_t reserved_7_7 : 1;
+ uint64_t len : 7; /**< Amount of User-defined data before the start of
+ the L2 data. Zero means L2 comes first.
+ Max value is 64. */
+#else
+ uint64_t len : 7;
+ uint64_t reserved_7_7 : 1;
+ uint64_t fcssel : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rxx_udd_skp cvmx_agl_gmx_rxx_udd_skp_t;
+
+/**
+ * cvmx_agl_gmx_rx_bp_drop#
+ *
+ * AGL_GMX_RX_BP_DROP = FIFO mark for packet drop
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rx_bp_dropx
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_dropx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO.
+ When the FIFO exceeds this count, packets will
+ be dropped and not buffered.
+ MARK should typically be programmed to 2.
+ Failure to program correctly can lead to system
+ instability. */
+#else
+ uint64_t mark : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rx_bp_dropx cvmx_agl_gmx_rx_bp_dropx_t;
+
+/**
+ * cvmx_agl_gmx_rx_bp_off#
+ *
+ * AGL_GMX_RX_BP_OFF = Lowater mark for packet drop
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rx_bp_offx
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_offx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */
+#else
+ uint64_t mark : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rx_bp_offx cvmx_agl_gmx_rx_bp_offx_t;
+
+/**
+ * cvmx_agl_gmx_rx_bp_on#
+ *
+ * AGL_GMX_RX_BP_ON = Hiwater mark for port/interface backpressure
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_rx_bp_onx
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_onx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure. */
+#else
+ uint64_t mark : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rx_bp_onx cvmx_agl_gmx_rx_bp_onx_t;
+
+/**
+ * cvmx_agl_gmx_rx_prt_info
+ *
+ * AGL_GMX_RX_PRT_INFO = state information for the ports
+ *
+ *
+ * Notes:
+ * COMMIT[0], DROP[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * COMMIT[1], DROP[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rx_prt_info
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_prt_info_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_18_63 : 46;
+ uint64_t drop : 2; /**< Port indication that data was dropped */
+ uint64_t reserved_2_15 : 14;
+ uint64_t commit : 2; /**< Port indication that SOP was accepted */
+#else
+ uint64_t commit : 2;
+ uint64_t reserved_2_15 : 14;
+ uint64_t drop : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_prt_info_s cn52xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_prt_info_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t drop : 1; /**< Port indication that data was dropped */
+ uint64_t reserved_1_15 : 15;
+ uint64_t commit : 1; /**< Port indication that SOP was accepted */
+#else
+ uint64_t commit : 1;
+ uint64_t reserved_1_15 : 15;
+ uint64_t drop : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_rx_prt_info_s cn63xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rx_prt_info cvmx_agl_gmx_rx_prt_info_t;
+
+/**
+ * cvmx_agl_gmx_rx_tx_status
+ *
+ * AGL_GMX_RX_TX_STATUS = GMX RX/TX Status
+ *
+ *
+ * Notes:
+ * RX[0], TX[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * RX[1], TX[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_rx_tx_status
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_tx_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t tx : 2; /**< Transmit data since last read */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rx : 2; /**< Receive data since last read */
+#else
+ uint64_t rx : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t tx : 2;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_tx_status_s cn52xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_tx_status_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t tx : 1; /**< Transmit data since last read */
+ uint64_t reserved_1_3 : 3;
+ uint64_t rx : 1; /**< Receive data since last read */
+#else
+ uint64_t rx : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t tx : 1;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_rx_tx_status_s cn63xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_rx_tx_status cvmx_agl_gmx_rx_tx_status_t;
+
+/**
+ * cvmx_agl_gmx_smac#
+ *
+ * AGL_GMX_SMAC = Packet SMAC
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_smacx
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_smacx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t smac : 48; /**< The SMAC field is used for generating and
+ accepting Control Pause packets */
+#else
+ uint64_t smac : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_smacx_s cn52xx;
+ struct cvmx_agl_gmx_smacx_s cn52xxp1;
+ struct cvmx_agl_gmx_smacx_s cn56xx;
+ struct cvmx_agl_gmx_smacx_s cn56xxp1;
+ struct cvmx_agl_gmx_smacx_s cn63xx;
+ struct cvmx_agl_gmx_smacx_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_smacx cvmx_agl_gmx_smacx_t;
+
+/**
+ * cvmx_agl_gmx_stat_bp
+ *
+ * AGL_GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_stat_bp
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_stat_bp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t bp : 1; /**< Current BP state */
+ uint64_t cnt : 16; /**< Number of cycles that BP has been asserted
+ Saturating counter */
+#else
+ uint64_t cnt : 16;
+ uint64_t bp : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } s;
+ struct cvmx_agl_gmx_stat_bp_s cn52xx;
+ struct cvmx_agl_gmx_stat_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_stat_bp_s cn56xx;
+ struct cvmx_agl_gmx_stat_bp_s cn56xxp1;
+ struct cvmx_agl_gmx_stat_bp_s cn63xx;
+ struct cvmx_agl_gmx_stat_bp_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_stat_bp cvmx_agl_gmx_stat_bp_t;
+
+/**
+ * cvmx_agl_gmx_tx#_append
+ *
+ * AGL_GMX_TX_APPEND = Packet TX Append Control
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_append
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_append_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet
+ when FCS is clear. Pause packets are normally
+ padded to 60 bytes. If
+ AGL_GMX_TX_MIN_PKT[MIN_SIZE] exceeds 59, then
+ FORCE_FCS will not be used. */
+ uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */
+ uint64_t pad : 1; /**< Append PAD bytes such that min sized */
+ uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer */
+#else
+ uint64_t preamble : 1;
+ uint64_t pad : 1;
+ uint64_t fcs : 1;
+ uint64_t force_fcs : 1;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_append_s cn52xx;
+ struct cvmx_agl_gmx_txx_append_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_append_s cn56xx;
+ struct cvmx_agl_gmx_txx_append_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_append_s cn63xx;
+ struct cvmx_agl_gmx_txx_append_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_append cvmx_agl_gmx_txx_append_t;
+
+/**
+ * cvmx_agl_gmx_tx#_clk
+ *
+ * AGL_GMX_TX_CLK = RGMII TX Clock Generation Register
+ *
+ *
+ * Notes:
+ * Normal Programming Values:
+ * (1) RGMII, 1000Mbs (AGL_GMX_PRT_CFG[SPEED]==1), CLK_CNT == 1
+ * (2) RGMII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 50/5
+ * (3) MII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 1
+ *
+ * RGMII Example:
+ * Given a 125MHz PLL reference clock...
+ * CLK_CNT == 1 ==> 125.0MHz TXC clock period (8ns* 1)
+ * CLK_CNT == 5 ==> 25.0MHz TXC clock period (8ns* 5)
+ * CLK_CNT == 50 ==> 2.5MHz TXC clock period (8ns*50)
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_clk
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_clk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t clk_cnt : 6; /**< Controls the RGMII TXC frequency | NS
+ TXC(period) =
+ rgm_ref_clk(period)*CLK_CNT */
+#else
+ uint64_t clk_cnt : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_clk_s cn63xx;
+ struct cvmx_agl_gmx_txx_clk_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_clk cvmx_agl_gmx_txx_clk_t;
+
+/**
+ * cvmx_agl_gmx_tx#_ctl
+ *
+ * AGL_GMX_TX_CTL = TX Control register
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_ctl
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats
+ and interrupts */
+ uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats
+ and interrupts */
+#else
+ uint64_t xscol_en : 1;
+ uint64_t xsdef_en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_ctl_s cn52xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_ctl_s cn56xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_ctl_s cn63xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_ctl cvmx_agl_gmx_txx_ctl_t;
+
+/**
+ * cvmx_agl_gmx_tx#_min_pkt
+ *
+ * AGL_GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size)
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_min_pkt
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_min_pkt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied
+ Padding is only appened when
+ AGL_GMX_TX_APPEND[PAD] for the coresponding packet
+ port is set. Packets will be padded to
+ MIN_SIZE+1 The reset value will pad to 60 bytes. */
+#else
+ uint64_t min_size : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn52xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn56xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn63xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_min_pkt cvmx_agl_gmx_txx_min_pkt_t;
+
+/**
+ * cvmx_agl_gmx_tx#_pause_pkt_interval
+ *
+ * AGL_GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent
+ *
+ *
+ * Notes:
+ * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
+ * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
+ * designer. It is suggested that TIME be much greater than INTERVAL and
+ * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
+ * count and then when the backpressure condition is lifted, a PAUSE packet
+ * with TIME==0 will be sent indicating that Octane is ready for additional
+ * data.
+ *
+ * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
+ * suggested that TIME and INTERVAL are programmed such that they satisify the
+ * following rule...
+ *
+ * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
+ *
+ * where largest_pkt_size is that largest packet that the system can send
+ * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
+ * of the PAUSE packet (normally 64B).
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_pause_pkt_interval
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t interval : 16; /**< Arbitrate for a pause packet every (INTERVAL*512)
+ bit-times.
+ Normally, 0 < INTERVAL < AGL_GMX_TX_PAUSE_PKT_TIME
+ INTERVAL=0, will only send a single PAUSE packet
+ for each backpressure event */
+#else
+ uint64_t interval : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_pause_pkt_interval cvmx_agl_gmx_txx_pause_pkt_interval_t;
+
+/**
+ * cvmx_agl_gmx_tx#_pause_pkt_time
+ *
+ * AGL_GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field
+ *
+ *
+ * Notes:
+ * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
+ * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
+ * designer. It is suggested that TIME be much greater than INTERVAL and
+ * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
+ * count and then when the backpressure condition is lifted, a PAUSE packet
+ * with TIME==0 will be sent indicating that Octane is ready for additional
+ * data.
+ *
+ * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
+ * suggested that TIME and INTERVAL are programmed such that they satisify the
+ * following rule...
+ *
+ * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
+ *
+ * where largest_pkt_size is that largest packet that the system can send
+ * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
+ * of the PAUSE packet (normally 64B).
+ *
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_pause_pkt_time
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< The pause_time field placed is outbnd pause pkts
+ pause_time is in 512 bit-times
+ Normally, TIME > AGL_GMX_TX_PAUSE_PKT_INTERVAL */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_pause_pkt_time cvmx_agl_gmx_txx_pause_pkt_time_t;
+
+/**
+ * cvmx_agl_gmx_tx#_pause_togo
+ *
+ * AGL_GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_pause_togo
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_togo_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< Amount of time remaining to backpressure */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_pause_togo cvmx_agl_gmx_txx_pause_togo_t;
+
+/**
+ * cvmx_agl_gmx_tx#_pause_zero
+ *
+ * AGL_GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_pause_zero
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_zero_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t send : 1; /**< When backpressure condition clear, send PAUSE
+ packet with pause_time of zero to enable the
+ channel */
+#else
+ uint64_t send : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_pause_zero cvmx_agl_gmx_txx_pause_zero_t;
+
+/**
+ * cvmx_agl_gmx_tx#_soft_pause
+ *
+ * AGL_GMX_TX_SOFT_PAUSE = Packet TX Software Pause
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_soft_pause
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_soft_pause_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times
+ for full-duplex operation only */
+#else
+ uint64_t time : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn52xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn56xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn63xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_soft_pause cvmx_agl_gmx_txx_soft_pause_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat0
+ *
+ * AGL_GMX_TX_STAT0 = AGL_GMX_TX_STATS_XSDEF / AGL_GMX_TX_STATS_XSCOL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat0
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t xsdef : 32; /**< Number of packets dropped (never successfully
+ sent) due to excessive deferal */
+ uint64_t xscol : 32; /**< Number of packets dropped (never successfully
+ sent) due to excessive collision. Defined by
+ AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
+#else
+ uint64_t xscol : 32;
+ uint64_t xsdef : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat0_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat0_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat0_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat0 cvmx_agl_gmx_txx_stat0_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat1
+ *
+ * AGL_GMX_TX_STAT1 = AGL_GMX_TX_STATS_SCOL / AGL_GMX_TX_STATS_MCOL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat1
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t scol : 32; /**< Number of packets sent with a single collision */
+ uint64_t mcol : 32; /**< Number of packets sent with multiple collisions
+ but < AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
+#else
+ uint64_t mcol : 32;
+ uint64_t scol : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat1_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat1_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat1_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat1 cvmx_agl_gmx_txx_stat1_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat2
+ *
+ * AGL_GMX_TX_STAT2 = AGL_GMX_TX_STATS_OCTS
+ *
+ *
+ * Notes:
+ * - Octect counts are the sum of all data transmitted on the wire including
+ * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect
+ * counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat2
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t octs : 48; /**< Number of total octets sent on the interface.
+ Does not count octets from frames that were
+ truncated due to collisions in halfdup mode. */
+#else
+ uint64_t octs : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat2_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat2_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat2_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat2 cvmx_agl_gmx_txx_stat2_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat3
+ *
+ * AGL_GMX_TX_STAT3 = AGL_GMX_TX_STATS_PKTS
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat3
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t pkts : 32; /**< Number of total frames sent on the interface.
+ Does not count frames that were truncated due to
+ collisions in halfdup mode. */
+#else
+ uint64_t pkts : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat3_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat3_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat3_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat3 cvmx_agl_gmx_txx_stat3_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat4
+ *
+ * AGL_GMX_TX_STAT4 = AGL_GMX_TX_STATS_HIST1 (64) / AGL_GMX_TX_STATS_HIST0 (<64)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat4
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */
+ uint64_t hist0 : 32; /**< Number of packets sent with an octet count
+ of < 64. */
+#else
+ uint64_t hist0 : 32;
+ uint64_t hist1 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat4_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat4_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat4_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat4 cvmx_agl_gmx_txx_stat4_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat5
+ *
+ * AGL_GMX_TX_STAT5 = AGL_GMX_TX_STATS_HIST3 (128- 255) / AGL_GMX_TX_STATS_HIST2 (65- 127)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat5
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t hist3 : 32; /**< Number of packets sent with an octet count of
+ 128 - 255. */
+ uint64_t hist2 : 32; /**< Number of packets sent with an octet count of
+ 65 - 127. */
+#else
+ uint64_t hist2 : 32;
+ uint64_t hist3 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat5_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat5_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat5_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat5 cvmx_agl_gmx_txx_stat5_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat6
+ *
+ * AGL_GMX_TX_STAT6 = AGL_GMX_TX_STATS_HIST5 (512-1023) / AGL_GMX_TX_STATS_HIST4 (256-511)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat6
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t hist5 : 32; /**< Number of packets sent with an octet count of
+ 512 - 1023. */
+ uint64_t hist4 : 32; /**< Number of packets sent with an octet count of
+ 256 - 511. */
+#else
+ uint64_t hist4 : 32;
+ uint64_t hist5 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat6_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat6_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat6_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat6 cvmx_agl_gmx_txx_stat6_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat7
+ *
+ * AGL_GMX_TX_STAT7 = AGL_GMX_TX_STATS_HIST7 (1024-1518) / AGL_GMX_TX_STATS_HIST6 (>1518)
+ *
+ *
+ * Notes:
+ * - Packet length is the sum of all data transmitted on the wire for the given
+ * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
+ * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat7
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t hist7 : 32; /**< Number of packets sent with an octet count
+ of > 1518. */
+ uint64_t hist6 : 32; /**< Number of packets sent with an octet count of
+ 1024 - 1518. */
+#else
+ uint64_t hist6 : 32;
+ uint64_t hist7 : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat7_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat7_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat7_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat7 cvmx_agl_gmx_txx_stat7_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat8
+ *
+ * AGL_GMX_TX_STAT8 = AGL_GMX_TX_STATS_MCST / AGL_GMX_TX_STATS_BCST
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the
+ * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet
+ * as per the 802.3 frame definition. If the system requires additional data
+ * before the L2 header, then the MCST and BCST counters may not reflect
+ * reality and should be ignored by software.
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat8
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat8_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC.
+ Does not include BCST packets. */
+ uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC.
+ Does not include MCST packets. */
+#else
+ uint64_t bcst : 32;
+ uint64_t mcst : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat8_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat8_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat8_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat8 cvmx_agl_gmx_txx_stat8_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stat9
+ *
+ * AGL_GMX_TX_STAT9 = AGL_GMX_TX_STATS_UNDFLW / AGL_GMX_TX_STATS_CTL
+ *
+ *
+ * Notes:
+ * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
+ * - Counters will wrap
+ * - Not reset when MIX*_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_txx_stat9
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat9_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t undflw : 32; /**< Number of underflow packets */
+ uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control)
+ generated by GMX. It does not include control
+ packets forwarded or generated by the PP's. */
+#else
+ uint64_t ctl : 32;
+ uint64_t undflw : 32;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stat9_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat9_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat9_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stat9 cvmx_agl_gmx_txx_stat9_t;
+
+/**
+ * cvmx_agl_gmx_tx#_stats_ctl
+ *
+ * AGL_GMX_TX_STATS_CTL = TX Stats Control register
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_stats_ctl
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stats_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t rd_clr : 1; /**< Stats registers will clear on reads */
+#else
+ uint64_t rd_clr : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn52xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn63xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_stats_ctl cvmx_agl_gmx_txx_stats_ctl_t;
+
+/**
+ * cvmx_agl_gmx_tx#_thresh
+ *
+ * AGL_GMX_TX_THRESH = Packet TX Threshold
+ *
+ *
+ * Notes:
+ * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
+ *
+ */
+union cvmx_agl_gmx_txx_thresh
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_thresh_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t cnt : 6; /**< Number of 16B ticks to accumulate in the TX FIFO
+ before sending on the packet interface
+ This register should be large enough to prevent
+ underflow on the packet interface and must never
+ be set below 4. This register cannot exceed the
+ the TX FIFO depth which is 128, 8B entries. */
+#else
+ uint64_t cnt : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_txx_thresh_s cn52xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_thresh_s cn56xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_thresh_s cn63xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_txx_thresh cvmx_agl_gmx_txx_thresh_t;
+
+/**
+ * cvmx_agl_gmx_tx_bp
+ *
+ * AGL_GMX_TX_BP = Packet TX BackPressure Register
+ *
+ *
+ * Notes:
+ * BP[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * BP[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_tx_bp
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_bp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t bp : 2; /**< Port BackPressure status
+ 0=Port is available
+ 1=Port should be back pressured */
+#else
+ uint64_t bp : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_bp_s cn52xx;
+ struct cvmx_agl_gmx_tx_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_bp_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t bp : 1; /**< Port BackPressure status
+ 0=Port is available
+ 1=Port should be back pressured */
+#else
+ uint64_t bp : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_bp_s cn63xx;
+ struct cvmx_agl_gmx_tx_bp_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_tx_bp cvmx_agl_gmx_tx_bp_t;
+
+/**
+ * cvmx_agl_gmx_tx_col_attempt
+ *
+ * AGL_GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_tx_col_attempt
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_col_attempt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t limit : 5; /**< Collision Attempts */
+#else
+ uint64_t limit : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn52xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn56xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn63xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_tx_col_attempt cvmx_agl_gmx_tx_col_attempt_t;
+
+/**
+ * cvmx_agl_gmx_tx_ifg
+ *
+ * Common
+ *
+ *
+ * AGL_GMX_TX_IFG = Packet TX Interframe Gap
+ *
+ * Notes:
+ * Notes:
+ * * Programming IFG1 and IFG2.
+ *
+ * For half-duplex systems that require IEEE 802.3 compatibility, IFG1 must
+ * be in the range of 1-8, IFG2 must be in the range of 4-12, and the
+ * IFG1+IFG2 sum must be 12.
+ *
+ * For full-duplex systems that require IEEE 802.3 compatibility, IFG1 must
+ * be in the range of 1-11, IFG2 must be in the range of 1-11, and the
+ * IFG1+IFG2 sum must be 12.
+ *
+ * For all other systems, IFG1 and IFG2 can be any value in the range of
+ * 1-15. Allowing for a total possible IFG sum of 2-30.
+ *
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+union cvmx_agl_gmx_tx_ifg
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_ifg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing
+ If CRS is detected during IFG2, then the
+ interFrameSpacing timer is not reset and a frame
+ is transmited once the timer expires. */
+ uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing
+ If CRS is detected during IFG1, then the
+ interFrameSpacing timer is reset and a frame is
+ not transmited. */
+#else
+ uint64_t ifg1 : 4;
+ uint64_t ifg2 : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_ifg_s cn52xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_ifg_s cn56xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_ifg_s cn63xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_tx_ifg cvmx_agl_gmx_tx_ifg_t;
+
+/**
+ * cvmx_agl_gmx_tx_int_en
+ *
+ * AGL_GMX_TX_INT_EN = Interrupt Enable
+ *
+ *
+ * Notes:
+ * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+union cvmx_agl_gmx_tx_int_en
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_int_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_22_63 : 42;
+ uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t reserved_18_19 : 2;
+ uint64_t late_col : 2; /**< TX Late Collision */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral (halfdup mode only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions (halfdup mode only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_19 : 2;
+ uint64_t ptp_lost : 2;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_int_en_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_18_63 : 46;
+ uint64_t late_col : 2; /**< TX Late Collision */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_tx_int_en_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_en_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t late_col : 1; /**< TX Late Collision */
+ uint64_t reserved_13_15 : 3;
+ uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_9_11 : 3;
+ uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_3_7 : 5;
+ uint64_t undflw : 1; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t xscol : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t xsdef : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t late_col : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_int_en_s cn63xx;
+ struct cvmx_agl_gmx_tx_int_en_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_tx_int_en cvmx_agl_gmx_tx_int_en_t;
+
+/**
+ * cvmx_agl_gmx_tx_int_reg
+ *
+ * AGL_GMX_TX_INT_REG = Interrupt Register
+ *
+ *
+ * Notes:
+ * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
+ */
+union cvmx_agl_gmx_tx_int_reg
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_int_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_22_63 : 42;
+ uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be
+ sent due to XSCOL */
+ uint64_t reserved_18_19 : 2;
+ uint64_t late_col : 2; /**< TX Late Collision */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral (halfdup mode only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions (halfdup mode only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_19 : 2;
+ uint64_t ptp_lost : 2;
+ uint64_t reserved_22_63 : 42;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_int_reg_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_18_63 : 46;
+ uint64_t late_col : 2; /**< TX Late Collision */
+ uint64_t reserved_14_15 : 2;
+ uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t undflw : 2; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 2;
+ uint64_t reserved_4_7 : 4;
+ uint64_t xscol : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t xsdef : 2;
+ uint64_t reserved_14_15 : 2;
+ uint64_t late_col : 2;
+ uint64_t reserved_18_63 : 46;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_tx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t late_col : 1; /**< TX Late Collision */
+ uint64_t reserved_13_15 : 3;
+ uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */
+ uint64_t reserved_9_11 : 3;
+ uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */
+ uint64_t reserved_3_7 : 5;
+ uint64_t undflw : 1; /**< TX Underflow (MII mode only) */
+ uint64_t reserved_1_1 : 1;
+ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
+#else
+ uint64_t pko_nxa : 1;
+ uint64_t reserved_1_1 : 1;
+ uint64_t undflw : 1;
+ uint64_t reserved_3_7 : 5;
+ uint64_t xscol : 1;
+ uint64_t reserved_9_11 : 3;
+ uint64_t xsdef : 1;
+ uint64_t reserved_13_15 : 3;
+ uint64_t late_col : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_s cn63xx;
+ struct cvmx_agl_gmx_tx_int_reg_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_tx_int_reg cvmx_agl_gmx_tx_int_reg_t;
+
+/**
+ * cvmx_agl_gmx_tx_jam
+ *
+ * AGL_GMX_TX_JAM = Packet TX Jam Pattern
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_tx_jam
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_jam_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t jam : 8; /**< Jam pattern */
+#else
+ uint64_t jam : 8;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_jam_s cn52xx;
+ struct cvmx_agl_gmx_tx_jam_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_jam_s cn56xx;
+ struct cvmx_agl_gmx_tx_jam_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_jam_s cn63xx;
+ struct cvmx_agl_gmx_tx_jam_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_tx_jam cvmx_agl_gmx_tx_jam_t;
+
+/**
+ * cvmx_agl_gmx_tx_lfsr
+ *
+ * AGL_GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_tx_lfsr
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_lfsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random
+ numbers to compute truncated binary exponential
+ backoff. */
+#else
+ uint64_t lfsr : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_lfsr_s cn52xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_lfsr_s cn56xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_lfsr_s cn63xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_tx_lfsr cvmx_agl_gmx_tx_lfsr_t;
+
+/**
+ * cvmx_agl_gmx_tx_ovr_bp
+ *
+ * AGL_GMX_TX_OVR_BP = Packet TX Override BackPressure
+ *
+ *
+ * Notes:
+ * IGN_FULL[0], BP[0], EN[0] will be reset when MIX0_CTL[RESET] is set to 1.
+ * IGN_FULL[1], BP[1], EN[1] will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_gmx_tx_ovr_bp
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_ovr_bp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_10_63 : 54;
+ uint64_t en : 2; /**< Per port Enable back pressure override */
+ uint64_t reserved_6_7 : 2;
+ uint64_t bp : 2; /**< Port BackPressure status to use
+ 0=Port is available
+ 1=Port should be back pressured */
+ uint64_t reserved_2_3 : 2;
+ uint64_t ign_full : 2; /**< Ignore the RX FIFO full when computing BP */
+#else
+ uint64_t ign_full : 2;
+ uint64_t reserved_2_3 : 2;
+ uint64_t bp : 2;
+ uint64_t reserved_6_7 : 2;
+ uint64_t en : 2;
+ uint64_t reserved_10_63 : 54;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn52xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_ovr_bp_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t en : 1; /**< Per port Enable back pressure override */
+ uint64_t reserved_5_7 : 3;
+ uint64_t bp : 1; /**< Port BackPressure status to use
+ 0=Port is available
+ 1=Port should be back pressured */
+ uint64_t reserved_1_3 : 3;
+ uint64_t ign_full : 1; /**< Ignore the RX FIFO full when computing BP */
+#else
+ uint64_t ign_full : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t bp : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t en : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn63xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_tx_ovr_bp cvmx_agl_gmx_tx_ovr_bp_t;
+
+/**
+ * cvmx_agl_gmx_tx_pause_pkt_dmac
+ *
+ * AGL_GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_tx_pause_pkt_dmac
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_48_63 : 16;
+ uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */
+#else
+ uint64_t dmac : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_tx_pause_pkt_dmac cvmx_agl_gmx_tx_pause_pkt_dmac_t;
+
+/**
+ * cvmx_agl_gmx_tx_pause_pkt_type
+ *
+ * AGL_GMX_TX_PAUSE_PKT_TYPE = Packet TX Pause Packet TYPE field
+ *
+ *
+ * Notes:
+ * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
+ *
+ */
+union cvmx_agl_gmx_tx_pause_pkt_type
+{
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */
+#else
+ uint64_t type : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xxp1;
+};
+typedef union cvmx_agl_gmx_tx_pause_pkt_type cvmx_agl_gmx_tx_pause_pkt_type_t;
+
+/**
+ * cvmx_agl_prt#_ctl
+ *
+ * AGL_PRT_CTL = AGL Port Control
+ *
+ *
+ * Notes:
+ * AGL_PRT0_CTL will be reset when MIX0_CTL[RESET] is set to 1.
+ * AGL_PRT1_CTL will be reset when MIX1_CTL[RESET] is set to 1.
+ */
+union cvmx_agl_prtx_ctl
+{
+ uint64_t u64;
+ struct cvmx_agl_prtx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t drv_byp : 1; /**< Bypass the compensation controller and use
+ DRV_NCTL and DRV_PCTL
+ Note: the reset value was changed from pass1
+ to pass2. */
+ uint64_t reserved_62_62 : 1;
+ uint64_t cmp_pctl : 6; /**< PCTL drive strength from the compensation ctl */
+ uint64_t reserved_54_55 : 2;
+ uint64_t cmp_nctl : 6; /**< NCTL drive strength from the compensation ctl */
+ uint64_t reserved_46_47 : 2;
+ uint64_t drv_pctl : 6; /**< PCTL drive strength to use in bypass mode
+ Reset value of 19 is for 50 ohm termination */
+ uint64_t reserved_38_39 : 2;
+ uint64_t drv_nctl : 6; /**< NCTL drive strength to use in bypass mode
+ Reset value of 15 is for 50 ohm termination */
+ uint64_t reserved_29_31 : 3;
+ uint64_t clk_set : 5; /**< The clock delay as determined by the DLL */
+ uint64_t clkrx_byp : 1; /**< Bypass the RX clock delay setting
+ Skews RXC from RXD,RXCTL in RGMII mode
+ By default, HW internally shifts the RXC clock
+ to sample RXD,RXCTL assuming clock and data and
+ sourced synchronously from the link partner.
+ In MII mode, the CLKRX_BYP is forced to 1. */
+ uint64_t reserved_21_22 : 2;
+ uint64_t clkrx_set : 5; /**< RX clock delay setting to use in bypass mode
+ Skews RXC from RXD in RGMII mode */
+ uint64_t clktx_byp : 1; /**< Bypass the TX clock delay setting
+ Skews TXC from TXD,TXCTL in RGMII mode
+ Skews RXC from RXD,RXCTL in RGMII mode
+ By default, clock and data and sourced
+ synchronously.
+ In MII mode, the CLKRX_BYP is forced to 1. */
+ uint64_t reserved_13_14 : 2;
+ uint64_t clktx_set : 5; /**< TX clock delay setting to use in bypass mode
+ Skews TXC from TXD in RGMII mode */
+ uint64_t reserved_5_7 : 3;
+ uint64_t dllrst : 1; /**< DLL Reset */
+ uint64_t comp : 1; /**< Compensation Enable */
+ uint64_t enable : 1; /**< Port Enable
+ Note: the reset value was changed from pass1
+ to pass2. */
+ uint64_t clkrst : 1; /**< Clock Tree Reset */
+ uint64_t mode : 1; /**< Port Mode
+ MODE must be set the same for all ports in which
+ AGL_PRTx_CTL[ENABLE] is set.
+ 0=RGMII
+ 1=MII */
+#else
+ uint64_t mode : 1;
+ uint64_t clkrst : 1;
+ uint64_t enable : 1;
+ uint64_t comp : 1;
+ uint64_t dllrst : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t clktx_set : 5;
+ uint64_t reserved_13_14 : 2;
+ uint64_t clktx_byp : 1;
+ uint64_t clkrx_set : 5;
+ uint64_t reserved_21_22 : 2;
+ uint64_t clkrx_byp : 1;
+ uint64_t clk_set : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t drv_nctl : 6;
+ uint64_t reserved_38_39 : 2;
+ uint64_t drv_pctl : 6;
+ uint64_t reserved_46_47 : 2;
+ uint64_t cmp_nctl : 6;
+ uint64_t reserved_54_55 : 2;
+ uint64_t cmp_pctl : 6;
+ uint64_t reserved_62_62 : 1;
+ uint64_t drv_byp : 1;
+#endif
+ } s;
+ struct cvmx_agl_prtx_ctl_s cn63xx;
+ struct cvmx_agl_prtx_ctl_s cn63xxp1;
+};
+typedef union cvmx_agl_prtx_ctl cvmx_agl_prtx_ctl_t;
+
+#endif
diff --git a/cvmx-app-hotplug.c b/cvmx-app-hotplug.c
new file mode 100644
index 000000000000..6145134b86bc
--- /dev/null
+++ b/cvmx-app-hotplug.c
@@ -0,0 +1,402 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Provides APIs for applications to register for hotplug. It also provides
+ * APIs for requesting shutdown of a running target application.
+ *
+ * <hr>$Revision: $<hr>
+ */
+
+#include "cvmx-app-hotplug.h"
+#include "cvmx-spinlock.h"
+
+//#define DEBUG 1
+
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+
+static CVMX_SHARED cvmx_spinlock_t cvmx_app_hotplug_sync_lock = { CVMX_SPINLOCK_UNLOCKED_VAL };
+static CVMX_SHARED cvmx_spinlock_t cvmx_app_hotplug_lock = { CVMX_SPINLOCK_UNLOCKED_VAL };
+static CVMX_SHARED cvmx_app_hotplug_info_t *cvmx_app_hotplug_info_ptr = NULL;
+
+static void __cvmx_app_hotplug_shutdown(int irq_number, uint64_t registers[32], void *user_arg);
+static void __cvmx_app_hotplug_sync(void);
+static void __cvmx_app_hotplug_reset(void);
+
+/**
+ * This routine registers an application for hotplug. It installs a handler for
+ * any incoming shutdown request. It also registers a callback routine from the
+ * application. This callback is invoked when the application receives a
+ * shutdown notification.
+ *
+ * This routine only needs to be called once per application.
+ *
+ * @param fn Callback routine from the application.
+ * @param arg Argument to the application callback routine.
+ * @return Return 0 on success, -1 on failure
+ *
+ */
+int cvmx_app_hotplug_register(void(*fn)(void*), void* arg)
+{
+ /* Find the list of applications launched by bootoct utility. */
+
+ if (!(cvmx_app_hotplug_info_ptr = cvmx_app_hotplug_get_info(cvmx_sysinfo_get()->core_mask)))
+ {
+ /* Application not launched by bootoct? */
+ printf("ERROR: cmvx_app_hotplug_register() failed\n");
+ return -1;
+ }
+
+ /* Register the callback */
+ cvmx_app_hotplug_info_ptr->data = CAST64(arg);
+ cvmx_app_hotplug_info_ptr->shutdown_callback = CAST64(fn);
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_app_hotplug_register(): coremask 0x%x valid %d\n",
+ cvmx_app_hotplug_info_ptr->coremask, cvmx_app_hotplug_info_ptr->valid);
+#endif
+
+ cvmx_interrupt_register(CVMX_IRQ_MBOX0, __cvmx_app_hotplug_shutdown, NULL);
+
+ return 0;
+}
+
+/**
+ * Activate the current application core for receiving hotplug shutdown requests.
+ *
+ * This routine makes sure that each core belonging to the application is enabled
+ * to receive the shutdown notification and also provides a barrier sync to make
+ * sure that all cores are ready.
+ */
+int cvmx_app_hotplug_activate(void)
+{
+ /* Make sure all application cores are activating */
+ __cvmx_app_hotplug_sync();
+
+ cvmx_spinlock_lock(&cvmx_app_hotplug_lock);
+
+ if (!cvmx_app_hotplug_info_ptr)
+ {
+ cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);
+ printf("ERROR: This application is not registered for hotplug\n");
+ return -1;
+ }
+
+ /* Enable the interrupt before we mark the core as activated */
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
+
+ cvmx_app_hotplug_info_ptr->hotplug_activated_coremask |= (1<<cvmx_get_core_num());
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_app_hotplug_activate(): coremask 0x%x valid %d sizeof %d\n",
+ cvmx_app_hotplug_info_ptr->coremask, cvmx_app_hotplug_info_ptr->valid,
+ sizeof(*cvmx_app_hotplug_info_ptr));
+#endif
+
+ cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);
+
+ return 0;
+}
+
+/**
+ * This routine is only required if cvmx_app_hotplug_shutdown_request() was called
+ * with wait=0. This routine waits for the application shutdown to complete.
+ *
+ * @param coremask Coremask the application is running on.
+ * @return 0 on success, -1 on error
+ *
+ */
+int cvmx_app_hotplug_shutdown_complete(uint32_t coremask)
+{
+ cvmx_app_hotplug_info_t *hotplug_info_ptr;
+
+ if (!(hotplug_info_ptr = cvmx_app_hotplug_get_info(coremask)))
+ {
+ printf("\nERROR: Failed to get hotplug info for coremask: 0x%x\n", (unsigned int)coremask);
+ return -1;
+ }
+
+ while(!hotplug_info_ptr->shutdown_done);
+
+ /* Clean up the hotplug info region for this app */
+ bzero(hotplug_info_ptr, sizeof(*hotplug_info_ptr));
+
+ return 0;
+}
+
+/**
+ * Disable recognition of any incoming shutdown request.
+ */
+
+void cvmx_app_hotplug_shutdown_disable(void)
+{
+ cvmx_interrupt_mask_irq(CVMX_IRQ_MBOX0);
+}
+
+/**
+ * Re-enable recognition of incoming shutdown requests.
+ */
+
+void cvmx_app_hotplug_shutdown_enable(void)
+{
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
+}
+
+/*
+ * ISR for the incoming shutdown request interrupt.
+ */
+static void __cvmx_app_hotplug_shutdown(int irq_number, uint64_t registers[32], void *user_arg)
+{
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+ uint32_t flags;
+
+ cvmx_interrupt_mask_irq(CVMX_IRQ_MBOX0);
+
+ /* Clear the interrupt */
+ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 1);
+
+ /* Make sure the write above completes */
+ cvmx_read_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()));
+
+ if (!cvmx_app_hotplug_info_ptr)
+ {
+ printf("ERROR: Application is not registered for hotplug!\n");
+ return;
+ }
+
+ if (cvmx_app_hotplug_info_ptr->hotplug_activated_coremask != sys_info_ptr->core_mask)
+ {
+ printf("ERROR: Shutdown requested when not all app cores have activated hotplug\n"
+ "Application coremask: 0x%x Hotplug coremask: 0x%x\n", (unsigned int)sys_info_ptr->core_mask,
+ (unsigned int)cvmx_app_hotplug_info_ptr->hotplug_activated_coremask);
+ return;
+ }
+
+ /* Call the application's own callback function */
+ ((void(*)(void*))(long)cvmx_app_hotplug_info_ptr->shutdown_callback)(CASTPTR(void *, cvmx_app_hotplug_info_ptr->data));
+
+ __cvmx_app_hotplug_sync();
+
+ if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
+ {
+ bzero(cvmx_app_hotplug_info_ptr, sizeof(*cvmx_app_hotplug_info_ptr));
+#ifdef DEBUG
+ cvmx_dprintf("__cvmx_app_hotplug_shutdown(): setting shutdown done! \n");
+#endif
+ cvmx_app_hotplug_info_ptr->shutdown_done = 1;
+ }
+
+ flags = cvmx_interrupt_disable_save();
+
+ __cvmx_app_hotplug_sync();
+
+ /* Reset the core */
+ __cvmx_app_hotplug_reset();
+}
+
+/*
+ * Reset the core. We just jump back to the reset vector for now.
+ */
+void __cvmx_app_hotplug_reset(void)
+{
+ /* Code from SecondaryCoreLoop from bootloader, sleep until we recieve
+ a NMI. */
+ __asm__ volatile (
+ ".set noreorder \n"
+ "\tsync \n"
+ "\tnop \n"
+ "1:\twait \n"
+ "\tb 1b \n"
+ "\tnop \n"
+ ".set reorder \n"
+ ::
+ );
+}
+
+/*
+ * We need a separate sync operation from cvmx_coremask_barrier_sync() to
+ * avoid a deadlock on state.lock, since the application itself maybe doing a
+ * cvmx_coremask_barrier_sync().
+ */
+static void __cvmx_app_hotplug_sync(void)
+{
+ static CVMX_SHARED volatile uint32_t sync_coremask = 0;
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+
+ cvmx_spinlock_lock(&cvmx_app_hotplug_sync_lock);
+
+ sync_coremask |= cvmx_coremask_core(cvmx_get_core_num());
+
+ cvmx_spinlock_unlock(&cvmx_app_hotplug_sync_lock);
+
+ while (sync_coremask != sys_info_ptr->core_mask);
+}
+
+#endif /* CVMX_BUILD_FOR_LINUX_USER */
+
+/**
+ * Return the hotplug info structure (cvmx_app_hotplug_info_t) pointer for the
+ * application running on the given coremask.
+ *
+ * @param coremask Coremask of application.
+ * @return Returns hotplug info struct on success, NULL on failure
+ *
+ */
+cvmx_app_hotplug_info_t* cvmx_app_hotplug_get_info(uint32_t coremask)
+{
+ const struct cvmx_bootmem_named_block_desc *block_desc;
+ cvmx_app_hotplug_info_t *hip;
+ cvmx_app_hotplug_global_t *hgp;
+ int i;
+
+ block_desc = cvmx_bootmem_find_named_block(CVMX_APP_HOTPLUG_INFO_REGION_NAME);
+
+ if (!block_desc)
+ {
+ printf("ERROR: Hotplug info region is not setup\n");
+ return NULL;
+ }
+ else
+
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+ {
+ size_t pg_sz = sysconf(_SC_PAGESIZE), size;
+ off_t offset;
+ char *vaddr;
+ int fd;
+
+ if ((fd = open("/dev/mem", O_RDWR)) == -1) {
+ perror("open");
+ return NULL;
+ }
+
+ /*
+ * We need to mmap() this memory, since this was allocated from the
+ * kernel bootup code and does not reside in the RESERVE32 region.
+ */
+ size = CVMX_APP_HOTPLUG_INFO_REGION_SIZE + pg_sz-1;
+ offset = block_desc->base_addr & ~(pg_sz-1);
+ if ((vaddr = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset)) == MAP_FAILED)
+ {
+ perror("mmap");
+ return NULL;
+ }
+
+ hgp = (cvmx_app_hotplug_global_t *)(vaddr + ( block_desc->base_addr & (pg_sz-1)));
+ }
+#else
+ hgp = cvmx_phys_to_ptr(block_desc->base_addr);
+#endif
+
+ hip = hgp->hotplug_info_array;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_app_hotplug_get_info(): hotplug_info phy addr 0x%llx ptr %p\n",
+ block_desc->base_addr, hgp);
+#endif
+
+ /* Look for the current app's info */
+
+ for (i=0; i<CVMX_APP_HOTPLUG_MAX_APPS; i++)
+ {
+ if (hip[i].coremask == coremask)
+ {
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_app_hotplug_get_info(): coremask match %d -- coremask 0x%x valid %d\n",
+ i, hip[i].coremask, hip[i].valid);
+#endif
+
+ return &hip[i];
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * This routine sends a shutdown request to a running target application.
+ *
+ * @param coremask Coremask the application is running on.
+ * @param wait 1 - Wait for shutdown completion
+ * 0 - Do not wait
+ * @return 0 on success, -1 on error
+ *
+ */
+
+int cvmx_app_hotplug_shutdown_request(uint32_t coremask, int wait)
+{
+ int i;
+ cvmx_app_hotplug_info_t *hotplug_info_ptr;
+
+ if (!(hotplug_info_ptr = cvmx_app_hotplug_get_info(coremask)))
+ {
+ printf("\nERROR: Failed to get hotplug info for coremask: 0x%x\n", (unsigned int)coremask);
+ return -1;
+ }
+
+ if (!hotplug_info_ptr->shutdown_callback)
+ {
+ printf("\nERROR: Target application has not registered for hotplug!\n");
+ return -1;
+ }
+
+ if (hotplug_info_ptr->hotplug_activated_coremask != coremask)
+ {
+ printf("\nERROR: Not all application cores have activated hotplug\n");
+ return -1;
+ }
+
+ /* Send IPIs to all application cores to request shutdown */
+ for (i=0; i<CVMX_MAX_CORES; i++) {
+ if (coremask & (1<<i))
+ cvmx_write_csr(CVMX_CIU_MBOX_SETX(i), 1);
+ }
+
+ if (wait)
+ {
+ while (!hotplug_info_ptr->shutdown_done);
+
+ /* Clean up the hotplug info region for this application */
+ bzero(hotplug_info_ptr, sizeof(*hotplug_info_ptr));
+ }
+
+ return 0;
+}
diff --git a/cvmx-app-hotplug.h b/cvmx-app-hotplug.h
new file mode 100644
index 000000000000..bfa62f8c35a8
--- /dev/null
+++ b/cvmx-app-hotplug.h
@@ -0,0 +1,103 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Header file for the hotplug APIs
+ *
+ * <hr>$Revision: $<hr>
+ */
+
+#ifndef __CVMX_APP_HOTPLUG_H__
+#define __CVMX_APP_HOTPLUG_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#else
+#include "cvmx.h"
+#include "cvmx-coremask.h"
+#include "cvmx-interrupt.h"
+#include "cvmx-bootmem.h"
+#endif
+
+#define CVMX_APP_HOTPLUG_MAX_APPS 32
+#define CVMX_APP_HOTPLUG_MAX_APPNAME_LEN 256
+
+typedef struct cvmx_app_hotplug_info
+{
+ char app_name[CVMX_APP_HOTPLUG_MAX_APPNAME_LEN];
+ uint32_t coremask;
+ uint32_t volatile hotplug_activated_coremask;
+ int32_t valid;
+ int32_t volatile shutdown_done;
+ uint64_t shutdown_callback;
+ uint64_t data;
+} cvmx_app_hotplug_info_t;
+
+struct cvmx_app_hotplug_global
+{
+ uint32_t avail_coremask;
+ cvmx_app_hotplug_info_t hotplug_info_array[CVMX_APP_HOTPLUG_MAX_APPS];
+};
+
+typedef struct cvmx_app_hotplug_global cvmx_app_hotplug_global_t;
+
+int cvmx_app_hotplug_shutdown_request(uint32_t, int);
+cvmx_app_hotplug_info_t* cvmx_app_hotplug_get_info(uint32_t);
+#ifndef CVMX_BUILD_FOR_LINUX_USER
+int cvmx_app_hotplug_register(void(*)(void*), void*);
+int cvmx_app_hotplug_activate(void);
+void cvmx_app_hotplug_shutdown_disable(void);
+void cvmx_app_hotplug_shutdown_enable(void);
+#endif
+
+#define CVMX_APP_HOTPLUG_INFO_REGION_SIZE sizeof(cvmx_app_hotplug_global_t)
+#define CVMX_APP_HOTPLUG_INFO_REGION_NAME "cvmx-app-hotplug-block"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_APP_HOTPLUG_H__ */
diff --git a/cvmx-app-init-linux.c b/cvmx-app-init-linux.c
index ed83b50dbe78..73726df953e0 100644
--- a/cvmx-app-init-linux.c
+++ b/cvmx-app-init-linux.c
@@ -1,45 +1,47 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+
/**
* @file
* Simple executive application initialization for Linux user space. This
@@ -59,7 +61,7 @@
* -# Most hardware can only be initialized once. Unless you're very careful,
* this also means you Linux application can only run once.
*
- * <hr>$Revision: 41757 $<hr>
+ * <hr>$Revision: 49448 $<hr>
*
*/
#define _GNU_SOURCE
@@ -107,11 +109,6 @@ extern uint64_t linux_mem32_max;
extern uint64_t linux_mem32_wired;
extern uint64_t linux_mem32_offset;
-#define MIPS_CAVIUM_XKPHYS_READ 2010 /* XKPHYS */
-#define MIPS_CAVIUM_XKPHYS_WRITE 2011 /* XKPHYS */
-
-static CVMX_SHARED int32_t warn_count;
-
/**
* This function performs some default initialization of the Octeon executive. It initializes
* the cvmx_bootmem memory allocator with the list of physical memory shared by the bootloader.
@@ -134,17 +131,17 @@ int cvmx_user_app_init(void)
* library printf for output. It also makes sure that two
* calls to simprintf provide atomic output.
*
- * @param fmt Format string in the same format as printf.
+ * @param format Format string in the same format as printf.
*/
-void simprintf(const char *fmt, ...)
+void simprintf(const char *format, ...)
{
CVMX_SHARED static cvmx_spinlock_t simprintf_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
va_list ap;
cvmx_spinlock_lock(&simprintf_lock);
printf("SIMPRINTF(%d): ", (int)cvmx_get_core_num());
- va_start(ap, fmt);
- vprintf(fmt, ap);
+ va_start(ap, format);
+ vprintf(format, ap);
va_end(ap);
cvmx_spinlock_unlock(&simprintf_lock);
}
@@ -325,7 +322,10 @@ int main(int argc, const char *argv[])
CVMX_SHARED static int32_t pending_fork;
unsigned long cpumask;
unsigned long cpu;
- int lastcpu = 0;
+ int firstcpu = 0;
+ int firstcore = 0;
+
+ cvmx_linux_enable_xkphys_access(0);
cvmx_sysinfo_linux_userspace_initialize();
@@ -344,7 +344,7 @@ int main(int argc, const char *argv[])
}
setup_cvmx_shared();
- cvmx_bootmem_init(cvmx_sysinfo_get()->phy_mem_desc_ptr);
+ cvmx_bootmem_init(cvmx_sysinfo_get()->phy_mem_desc_addr);
/* Check to make sure the Chip version matches the configured version */
octeon_model_version_check(cvmx_get_proc_id());
@@ -359,37 +359,40 @@ int main(int argc, const char *argv[])
cvmx_sysinfo_t *system_info = cvmx_sysinfo_get();
cvmx_atomic_set32(&pending_fork, 1);
- for (cpu=0; cpu<16; cpu++)
+
+ /* Get the lowest logical cpu */
+ firstcore = ffsl(cpumask) - 1;
+ cpumask ^= (1<<(firstcore));
+ while (1)
{
- if (cpumask & (1<<cpu))
+ if (cpumask == 0)
{
- /* Turn off the bit for this CPU number. We've counted him */
- cpumask ^= (1<<cpu);
- /* If this is the last CPU to run on, use this process instead of forking another one */
- if (cpumask == 0)
- {
- lastcpu = 1;
- break;
- }
- /* Increment the number of CPUs running this app */
- cvmx_atomic_add32(&pending_fork, 1);
- /* Flush all IO streams before the fork. Otherwise any buffered
- data in the C library will be duplicated. This results in
- duplicate output from a single print */
- fflush(NULL);
- /* Fork a process for the new CPU */
- int pid = fork();
- if (pid == 0)
- {
- break;
- }
- else if (pid == -1)
- {
- perror("Fork failed");
- exit(errno);
- }
+ cpu = firstcore;
+ firstcpu = 1;
+ break;
}
- }
+ cpu = ffsl(cpumask) - 1;
+ /* Turn off the bit for this CPU number. We've counted him */
+ cpumask ^= (1<<cpu);
+ /* Increment the number of CPUs running this app */
+ cvmx_atomic_add32(&pending_fork, 1);
+ /* Flush all IO streams before the fork. Otherwise any buffered
+ data in the C library will be duplicated. This results in
+ duplicate output from a single print */
+ fflush(NULL);
+ /* Fork a process for the new CPU */
+ int pid = fork();
+ if (pid == 0)
+ {
+ break;
+ }
+ else if (pid == -1)
+ {
+ perror("Fork failed");
+ exit(errno);
+ }
+ }
+
/* Set affinity to lock me to the correct CPU */
cpumask = (1<<cpu);
@@ -404,7 +407,7 @@ int main(int argc, const char *argv[])
cvmx_atomic_add32(&pending_fork, -1);
if (cvmx_atomic_get32(&pending_fork) == 0)
cvmx_dprintf("Active coremask = 0x%x\n", system_info->core_mask);
- if (lastcpu)
+ if (firstcpu)
system_info->init_core = cvmx_get_core_num();
cvmx_spinlock_unlock(&mask_lock);
@@ -413,27 +416,7 @@ int main(int argc, const char *argv[])
cvmx_coremask_barrier_sync(system_info->core_mask);
- int ret = sysmips(MIPS_CAVIUM_XKPHYS_WRITE, getpid(), 3, 0);
- if (ret != 0) {
- int32_t w = cvmx_atomic_fetch_and_add32(&warn_count, 1);
- if (!w) {
- switch(errno) {
- case EINVAL:
- perror("sysmips(MIPS_CAVIUM_XKPHYS_WRITE) failed.\n"
- " Did you configure your kernel with both:\n"
- " CONFIG_CAVIUM_OCTEON_USER_MEM_PER_PROCESS *and*\n"
- " CONFIG_CAVIUM_OCTEON_USER_IO_PER_PROCESS?");
- break;
- case EPERM:
- perror("sysmips(MIPS_CAVIUM_XKPHYS_WRITE) failed.\n"
- " Are you running as root?");
- break;
- default:
- perror("sysmips(MIPS_CAVIUM_XKPHYS_WRITE) failed");
- break;
- }
- }
- }
+ cvmx_linux_enable_xkphys_access(1);
int result = appmain(argc, argv);
diff --git a/cvmx-app-init.c b/cvmx-app-init.c
index 87692186a495..885e36ef8787 100644
--- a/cvmx-app-init.c
+++ b/cvmx-app-init.c
@@ -1,39 +1,40 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
@@ -41,6 +42,7 @@
+
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
@@ -52,14 +54,15 @@
#include "cvmx-sysinfo.h"
#include "cvmx-bootmem.h"
#include "cvmx-uart.h"
-#include "cvmx-ciu.h"
#include "cvmx-coremask.h"
#include "cvmx-core.h"
#include "cvmx-interrupt.h"
#include "cvmx-ebt3000.h"
+#include "cvmx-sim-magic.h"
+#include "cvmx-debug.h"
#include "../../bootloader/u-boot/include/octeon_mem_map.h"
-int cvmx_debug_uart;
+int cvmx_debug_uart = -1;
/**
* @file
@@ -114,7 +117,7 @@ static void process_boot_desc_ver_6(octeon_boot_descriptor_t *app_desc_ptr, cvmx
sys_info_ptr->stack_top = cvmx_bootinfo_ptr->stack_top;
sys_info_ptr->stack_size = cvmx_bootinfo_ptr->stack_size;
sys_info_ptr->init_core = cvmx_get_core_num();
- sys_info_ptr->phy_mem_desc_ptr = CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, cvmx_bootinfo_ptr->phy_mem_desc_addr));
+ sys_info_ptr->phy_mem_desc_addr = cvmx_bootinfo_ptr->phy_mem_desc_addr;
sys_info_ptr->exception_base_addr = cvmx_bootinfo_ptr->exception_base_addr;
sys_info_ptr->cpu_clock_hz = cvmx_bootinfo_ptr->eclock_hz;
sys_info_ptr->dram_data_rate_hz = cvmx_bootinfo_ptr->dclock_hz * 2;
@@ -129,7 +132,7 @@ static void process_boot_desc_ver_6(octeon_boot_descriptor_t *app_desc_ptr, cvmx
if (cvmx_bootinfo_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1)
sys_info_ptr->console_uart_num = 1;
- if (cvmx_bootinfo_ptr->dram_size > 16*1024*1024)
+ if (cvmx_bootinfo_ptr->dram_size > 32*1024*1024)
sys_info_ptr->system_dram_size = (uint64_t)cvmx_bootinfo_ptr->dram_size; /* older bootloaders incorrectly gave this in bytes, so don't convert */
else
sys_info_ptr->system_dram_size = (uint64_t)cvmx_bootinfo_ptr->dram_size * 1024 * 1024; /* convert from Megabytes to bytes */
@@ -140,7 +143,8 @@ static void process_boot_desc_ver_6(octeon_boot_descriptor_t *app_desc_ptr, cvmx
sys_info_ptr->led_display_base_addr = cvmx_bootinfo_ptr->led_display_base_addr;
}
else if (sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT3000 ||
- sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT5800)
+ sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT5800 ||
+ sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT5810)
{
/* Default these variables so that users of structure can be the same no
** matter what version fo boot info block the bootloader passes */
@@ -178,48 +182,12 @@ static void process_boot_desc_ver_6(octeon_boot_descriptor_t *app_desc_ptr, cvmx
{
printf("ERROR: Incompatible CVMX descriptor passed by bootloader: %d.%d\n",
(int)cvmx_bootinfo_ptr->major_version, (int)cvmx_bootinfo_ptr->minor_version);
- while (1);
+ exit(-1);
}
}
/**
- * Interrupt handler for debugger Control-C interrupts.
- *
- * @param irq_number IRQ interrupt number
- * @param registers CPU registers at the time of the interrupt
- * @param user_arg Unused user argument
- */
-static void process_debug_interrupt(int irq_number, uint64_t registers[32], void *user_arg)
-{
- int uart = irq_number - CVMX_IRQ_UART0;
- cvmx_uart_lsr_t lsrval;
-
- /* Check for a Control-C interrupt from the debugger. This loop will eat
- all input received on the uart */
- lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart));
- while (lsrval.s.dr)
- {
- int c = cvmx_read_csr(CVMX_MIO_UARTX_RBR(uart));
- if (c == '\003')
- {
- register uint64_t tmp;
- fflush(stderr);
- fflush(stdout);
- /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also
- set the MCD0 to be not masked by this core so we know
- the signal is received by someone */
- asm volatile (
- "dmfc0 %0, $22\n"
- "ori %0, %0, 0x1110\n"
- "dmtc0 %0, $22\n"
- : "=r" (tmp));
- }
- lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart));
- }
-}
-
-/**
* Interrupt handler for calling exit on Control-C interrupts.
*
* @param irq_number IRQ interrupt number
@@ -309,6 +277,7 @@ void __cvmx_app_init(uint64_t app_desc_addr)
/* app info structure used by the simple exec */
cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+ int breakflag = 0;
if (cvmx_coremask_first_core(app_desc_ptr->core_mask))
{
@@ -316,8 +285,7 @@ void __cvmx_app_init(uint64_t app_desc_addr)
if (app_desc_ptr->desc_version < 6)
{
printf("Obsolete bootloader, can't run application\n");
- while (1)
- ;
+ exit(-1);
}
else
{
@@ -332,95 +300,59 @@ void __cvmx_app_init(uint64_t app_desc_addr)
}
cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
- /* All cores need to enable MCD0 signals if the debugger flag is set */
- if (sys_info_ptr->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_DEBUG)
+ breakflag = sys_info_ptr->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_BREAK;
+
+ /* No need to initialize bootmem, interrupts, interrupt handler and error handler
+ if version does not match. */
+ if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
{
- /* Set all cores to stop on MCD0 signals */
- uint64_t tmp;
- asm volatile(
- "dmfc0 %0, $22, 0\n"
- "or %0, %0, 0x1100\n"
- "dmtc0 %0, $22, 0\n" : "=r" (tmp));
+ /* Check to make sure the Chip version matches the configured version */
+ uint32_t chip_id = cvmx_get_proc_id();
+ /* Make sure we can properly run on this chip */
+ octeon_model_version_check(chip_id);
}
cvmx_interrupt_initialize();
if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
{
- /* Check to make sure the Chip version matches the configured version */
- uint32_t chip_id = cvmx_get_proc_id();
- int debugflag = sys_info_ptr->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_DEBUG;
- int breakflag = sys_info_ptr->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_BREAK;
- int uart;
+ int break_uart = 0;
+ unsigned int i;
/* Intialize the bootmem allocator with the descriptor that was provided by
- ** the bootloader
- ** IMPORTANT: All printfs must happen after this since PCI console uses named
- ** blocks.
- */
- cvmx_bootmem_init(sys_info_ptr->phy_mem_desc_ptr);
-
- /* Make sure we can properly run on this chip */
- octeon_model_version_check(chip_id);
+ * the bootloader
+ * IMPORTANT: All printfs must happen after this since PCI console uses named
+ * blocks.
+ */
+ cvmx_bootmem_init(sys_info_ptr->phy_mem_desc_addr);
+ if (breakflag && cvmx_debug_booted())
+ {
+ printf("ERROR: Using debug and break together in not supported.\n");
+ while (1)
+ ;
+ }
- /* Default to the second uart port. Set this even if debug was
- not passed. The idea is that if the program crashes one would
- be able to break in on uart1 even without debug. */
- cvmx_debug_uart = 1;
- /* If the debugger flag is set, setup the uart Control-C interrupt
- handler */
- if (debugflag)
+ /* Search through the arguments for a break=X or a debug=X. */
+ for (i = 0; i < app_desc_ptr->argc; i++)
{
- /* Search through the arguments for a debug=X */
- unsigned int i;
- for (i=0; i<app_desc_ptr->argc; i++)
- {
- const char *argv = CASTPTR(const char, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, app_desc_ptr->argv[i]));
- if (strncmp(argv, "debug=", 6) == 0)
- {
- /* Use the supplied uart as an override */
- cvmx_debug_uart = atoi(argv+6);
- break;
- }
- }
- cvmx_interrupt_register(CVMX_IRQ_UART0+cvmx_debug_uart, process_debug_interrupt, NULL);
- uart = cvmx_debug_uart;
- }
- else if (breakflag)
- {
- unsigned int i;
- int32_t *trampoline = CASTPTR(int32_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, BOOTLOADER_DEBUG_TRAMPOLINE));
- /* Default to the first uart port. */
- uart = 0;
-
- /* Search through the arguments for a break=X */
- for (i = 0; i < app_desc_ptr->argc; i++)
- {
- const char *argv = CASTPTR(const char, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, app_desc_ptr->argv[i]));
- if (strncmp(argv, "break=", 6) == 0)
- {
- /* Use the supplied uart as an override */
- uart = atoi(argv+6);
- break;
- }
- }
+ const char *argv = CASTPTR(const char, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, app_desc_ptr->argv[i]));
+ if (strncmp(argv, "break=", 6) == 0)
+ break_uart = atoi(argv + 6);
+ else if (strncmp(argv, "debug=", 6) == 0)
+ cvmx_debug_uart = atoi(argv + 6);
+ }
- /* On debug exception, call exit_on_break from all cores. */
- *trampoline = (int32_t)(long)&exit_on_break;
- cvmx_interrupt_register(CVMX_IRQ_UART0 + uart, process_break_interrupt, NULL);
- }
- if (debugflag || breakflag)
- {
- /* Enable uart interrupts for debugger Control-C processing */
- cvmx_uart_ier_t ier;
- ier.u64 = cvmx_read_csr(CVMX_MIO_UARTX_IER(uart));
- ier.s.erbfi = 1;
- cvmx_write_csr(CVMX_MIO_UARTX_IER(uart), ier.u64);
-
- cvmx_interrupt_unmask_irq(CVMX_IRQ_UART0+uart);
+ if (breakflag)
+ {
+ int32_t *trampoline = CASTPTR(int32_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, BOOTLOADER_DEBUG_TRAMPOLINE));
+ /* On debug exception, call exit_on_break from all cores. */
+ *trampoline = (int32_t)(long)&exit_on_break;
+ cvmx_uart_enable_intr(break_uart, process_break_interrupt);
}
}
+ cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
+
/* Clear BEV now that we have installed exception handlers. */
uint64_t tmp;
asm volatile (
@@ -443,6 +375,11 @@ void __cvmx_app_init(uint64_t app_desc_addr)
"dmtc0 %0, $22, 0\n" : "=r" (tmp));
CVMX_SYNC;
+
+ /* Now intialize the debug exception handler as BEV is cleared. */
+ if (!breakflag)
+ cvmx_debug_init();
+
/* Synchronise all cores at this point */
cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
@@ -470,11 +407,6 @@ int cvmx_user_app_init(void)
printf("BIST FAILURE: COP0_CACHE_ERR: 0x%llx\n", (unsigned long long)bist_val);
bist_errors++;
}
- /* Clear parity error bits */
- CVMX_MF_CACHE_ERR(bist_val);
- bist_val &= ~0x7ull;
- CVMX_MT_CACHE_ERR(bist_val);
-
mask = 0xfc00000000000000ull;
CVMX_MF_CVM_MEM_CTL(bist_val);
@@ -485,29 +417,16 @@ int cvmx_user_app_init(void)
bist_errors++;
}
- /* Clear DCACHE parity error bit */
- bist_val = 0;
- CVMX_MF_DCACHE_ERR(bist_val);
-
- mask = 0x18ull;
- bist_val = cvmx_read_csr(CVMX_L2D_ERR);
- if (bist_val & mask)
- {
- printf("ERROR: ECC error detected in L2 Data, L2D_ERR: 0x%llx\n", (unsigned long long)bist_val);
- cvmx_write_csr(CVMX_L2D_ERR, bist_val); /* Clear error bits if set */
- }
- bist_val = cvmx_read_csr(CVMX_L2T_ERR);
- if (bist_val & mask)
- {
- printf("ERROR: ECC error detected in L2 Tags, L2T_ERR: 0x%llx\n", (unsigned long long)bist_val);
- cvmx_write_csr(CVMX_L2T_ERR, bist_val); /* Clear error bits if set */
- }
-
-
/* Set up 4 cache lines of local memory, make available from Kernel space */
CVMX_MF_CVM_MEM_CTL(tmp);
tmp &= ~0x1ffull;
tmp |= 0x104ull;
+ /* Set WBTHRESH=4 as per Core-14752 errata in cn63xxp1.X. */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ {
+ tmp &= ~(0xfull << 11);
+ tmp |= 4 << 11;
+ }
CVMX_MT_CVM_MEM_CTL(tmp);
@@ -519,11 +438,9 @@ int cvmx_user_app_init(void)
{
printf("ERROR: 1-1 TLB mappings configured and oversize application loaded.\n");
printf("ERROR: Either 1-1 TLB mappings must be disabled or application size reduced.\n");
- while (1)
- ;
+ exit(-1);
}
-
/* Create 1-1 Mappings for all DRAM up to 8 gigs, excluding the low 1 Megabyte. This area
** is reserved for the bootloader and exception vectors. By not mapping this area, NULL pointer
** dereferences will be caught with TLB exceptions. Exception handlers should be written
@@ -549,67 +466,62 @@ int cvmx_user_app_init(void)
#endif
cvmx_core_add_fixed_tlb_mapping(0x8000000ULL, 0x8000000ULL, 0xc000000ULL, CVMX_TLB_PAGEMASK_64M);
- /* Create 1-1 mapping for next 256 megs
- ** bottom page is not valid */
- cvmx_core_add_fixed_tlb_mapping_bits(0x400000000ULL, 0, 0x410000000ULL | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, CVMX_TLB_PAGEMASK_256M);
-
- /* Map from 0.5 up to the installed memory size in 512 MByte chunks. If this loop runs out of memory,
- ** the NULL pointer detection can be disabled to free up more TLB entries. */
- if (cvmx_sysinfo_get()->system_dram_size > 0x20000000ULL)
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
{
- for (base_addr = 0x20000000ULL; base_addr <= (cvmx_sysinfo_get()->system_dram_size - 0x20000000ULL); base_addr += 0x20000000ULL)
+ for (base_addr = 0x20000000ULL; base_addr < (cvmx_sysinfo_get()->system_dram_size + 0x10000000ULL); base_addr += 0x20000000ULL)
{
if (0 > cvmx_core_add_fixed_tlb_mapping(base_addr, base_addr, base_addr + 0x10000000ULL, CVMX_TLB_PAGEMASK_256M))
{
printf("ERROR adding 1-1 TLB mapping for address 0x%llx\n", (unsigned long long)base_addr);
- while (1); /* Hang here, as expected memory mappings aren't set up if this fails */
+ /* Exit from here, as expected memory mappings aren't set
+ up if this fails */
+ exit(-1);
}
}
}
+ else
+ {
+ /* Create 1-1 mapping for next 256 megs
+ ** bottom page is not valid */
+ cvmx_core_add_fixed_tlb_mapping_bits(0x400000000ULL, 0, 0x410000000ULL | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, CVMX_TLB_PAGEMASK_256M);
-
+ /* Map from 0.5 up to the installed memory size in 512 MByte chunks. If this loop runs out of memory,
+ ** the NULL pointer detection can be disabled to free up more TLB entries. */
+ if (cvmx_sysinfo_get()->system_dram_size > 0x20000000ULL)
+ {
+ for (base_addr = 0x20000000ULL; base_addr <= (cvmx_sysinfo_get()->system_dram_size - 0x20000000ULL); base_addr += 0x20000000ULL)
+ {
+ if (0 > cvmx_core_add_fixed_tlb_mapping(base_addr, base_addr, base_addr + 0x10000000ULL, CVMX_TLB_PAGEMASK_256M))
+ {
+ printf("ERROR adding 1-1 TLB mapping for address 0x%llx\n", (unsigned long long)base_addr);
+ /* Exit from here, as expected memory mappings
+ aren't set up if this fails */
+ exit(-1);
+ }
+ }
+ }
+ }
#endif
cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
- cvmx_bootmem_init(sys_info_ptr->phy_mem_desc_ptr);
+ cvmx_bootmem_init(sys_info_ptr->phy_mem_desc_addr);
return(0);
}
void __cvmx_app_exit(void)
{
- if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
- {
- uint64_t val;
- uint64_t mask, expected;
- int bist_errors = 0;
-
- mask = 0x1ull;
- expected = 0x0ull;
- CVMX_MF_DCACHE_ERR(val);
- val = (val & mask) ^ expected;
- if (val)
- {
- printf("DCACHE Parity error: 0x%llx\n", (unsigned long long)val);
- bist_errors++;
- }
-
- mask = 0x18ull;
- expected = 0x0ull;
- val = cvmx_read_csr(CVMX_L2D_ERR);
- val = (val & mask) ^ expected;
- if (val)
- {
- printf("L2 Parity error: 0x%llx\n", (unsigned long long)val);
- bist_errors++;
- }
-
-
- while (1)
- ;
+ cvmx_debug_finish();
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ {
+ CVMX_BREAK;
}
+ /* Hang forever, until more appropriate stand alone simple executive
+ exit() is implemented */
+
+ while (1);
}
diff --git a/cvmx-app-init.h b/cvmx-app-init.h
index cfe65fb4dbbd..f3f01fd957ce 100644
--- a/cvmx-app-init.h
+++ b/cvmx-app-init.h
@@ -1,50 +1,52 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+
/**
* @file
* Header file for simple executive application initialization. This defines
* part of the ABI between the bootloader and the application.
- * <hr>$Revision: 41586 $<hr>
+ * <hr>$Revision: 52004 $<hr>
*
*/
@@ -73,8 +75,7 @@ extern "C" {
** must be incremented, and the minor version should be reset
** to 0.
*/
-typedef struct
-{
+struct cvmx_bootinfo {
uint32_t major_version;
uint32_t minor_version;
@@ -121,7 +122,9 @@ typedef struct
#endif
-} cvmx_bootinfo_t;
+};
+
+typedef struct cvmx_bootinfo cvmx_bootinfo_t;
#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST (1ull << 0)
#define CVMX_BOOTINFO_CFG_FLAG_PCI_TARGET (1ull << 1)
@@ -166,6 +169,15 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_CB5601 = 26,
CVMX_BOARD_TYPE_CB5200 = 27,
CVMX_BOARD_TYPE_GENERIC = 28, /* Special 'generic' board type, supports many boards */
+ CVMX_BOARD_TYPE_EBH5610 = 29,
+ CVMX_BOARD_TYPE_LANAI2_A = 30,
+ CVMX_BOARD_TYPE_LANAI2_U = 31,
+ CVMX_BOARD_TYPE_EBB5600 = 32,
+ CVMX_BOARD_TYPE_EBB6300 = 33,
+ CVMX_BOARD_TYPE_NIC_XLE_10G = 34,
+ CVMX_BOARD_TYPE_LANAI2_G = 35,
+ CVMX_BOARD_TYPE_EBT5810 = 36,
+ CVMX_BOARD_TYPE_NIC10E = 37,
CVMX_BOARD_TYPE_MAX,
/* The range from CVMX_BOARD_TYPE_MAX to CVMX_BOARD_TYPE_CUST_DEFINED_MIN is reserved
@@ -189,6 +201,13 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_CUST_GCT108 = 10012,
CVMX_BOARD_TYPE_CUST_AGS109 = 10013,
CVMX_BOARD_TYPE_CUST_GCT110 = 10014,
+ CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER = 10015,
+ CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER= 10016,
+ CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX = 10017,
+ CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX = 10018,
+ CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX= 10019,
+ CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX= 10020,
+ CVMX_BOARD_TYPE_CUST_L2_ZINWELL = 10021,
CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
/* Set aside a range for customer private use. The SDK won't
@@ -196,6 +215,20 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
+
+ /* Range for IO modules */
+ CVMX_BOARD_TYPE_MODULE_MIN = 30001,
+ CVMX_BOARD_TYPE_MODULE_PCIE_RC_4X = 30002,
+ CVMX_BOARD_TYPE_MODULE_PCIE_EP_4X = 30003,
+ CVMX_BOARD_TYPE_MODULE_SGMII_MARVEL = 30004,
+ CVMX_BOARD_TYPE_MODULE_SFPPLUS_BCM = 30005,
+ CVMX_BOARD_TYPE_MODULE_SRIO = 30006,
+ CVMX_BOARD_TYPE_MODULE_EBB5600_QLM0 = 30007,
+ CVMX_BOARD_TYPE_MODULE_EBB5600_QLM1 = 30008,
+ CVMX_BOARD_TYPE_MODULE_EBB5600_QLM2 = 30009,
+ CVMX_BOARD_TYPE_MODULE_EBB5600_QLM3 = 30010,
+ CVMX_BOARD_TYPE_MODULE_MAX = 31000,
+
/* The remaining range is reserved for future use. */
};
enum cvmx_chip_types_enum {
@@ -243,6 +276,15 @@ static inline const char *cvmx_board_type_to_string(enum cvmx_board_types_enum t
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5601)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5200)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_GENERIC)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5610)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_A)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_U)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6300)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_10G)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_G)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5810)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX)
/* Customer boards listed here */
@@ -261,11 +303,32 @@ static inline const char *cvmx_board_type_to_string(enum cvmx_board_types_enum t
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT108)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS109)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT110)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ZINWELL)
+
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MAX)
/* Customer private range */
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
+
+ /* Module range */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_MIN)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_PCIE_RC_4X)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_PCIE_EP_4X)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_SGMII_MARVEL)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_SFPPLUS_BCM)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_SRIO)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM0)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM1)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM2)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM3)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_MAX)
}
return "Unsupported Board";
}
diff --git a/cvmx-asm.h b/cvmx-asm.h
index 2406677efca5..24c94a6bc9d0 100644
--- a/cvmx-asm.h
+++ b/cvmx-asm.h
@@ -1,39 +1,40 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
@@ -41,18 +42,77 @@
+
/**
* @file
*
* This is file defines ASM primitives for the executive.
- * <hr>$Revision: 42280 $<hr>
+ * <hr>$Revision: 52004 $<hr>
*
*
*/
#ifndef __CVMX_ASM_H__
#define __CVMX_ASM_H__
+#define COP0_INDEX $0,0 /* TLB read/write index */
+#define COP0_RANDOM $1,0 /* TLB random index */
+#define COP0_ENTRYLO0 $2,0 /* TLB entryLo0 */
+#define COP0_ENTRYLO1 $3,0 /* TLB entryLo1 */
+#define COP0_CONTEXT $4,0 /* Context */
+#define COP0_PAGEMASK $5,0 /* TLB pagemask */
+#define COP0_PAGEGRAIN $5,1 /* TLB config for max page sizes */
+#define COP0_WIRED $6,0 /* TLB number of wired entries */
+#define COP0_HWRENA $7,0 /* rdhw instruction enable per register */
+#define COP0_BADVADDR $8,0 /* Bad virtual address */
+#define COP0_COUNT $9,0 /* Mips count register */
+#define COP0_CVMCOUNT $9,6 /* Cavium count register */
+#define COP0_CVMCTL $9,7 /* Cavium control */
+#define COP0_ENTRYHI $10,0 /* TLB entryHi */
+#define COP0_COMPARE $11,0 /* Mips compare register */
+#define COP0_POWTHROTTLE $11,6 /* Power throttle register */
+#define COP0_CVMMEMCTL $11,7 /* Cavium memory control */
+#define COP0_STATUS $12,0 /* Mips status register */
+#define COP0_INTCTL $12,1 /* Useless (Vectored interrupts) */
+#define COP0_SRSCTL $12,2 /* Useless (Shadow registers) */
+#define COP0_CAUSE $13,0 /* Mips cause register */
+#define COP0_EPC $14,0 /* Exception program counter */
+#define COP0_PRID $15,0 /* Processor ID */
+#define COP0_EBASE $15,1 /* Exception base */
+#define COP0_CONFIG $16,0 /* Misc config options */
+#define COP0_CONFIG1 $16,1 /* Misc config options */
+#define COP0_CONFIG2 $16,2 /* Misc config options */
+#define COP0_CONFIG3 $16,3 /* Misc config options */
+#define COP0_WATCHLO0 $18,0 /* Address watch registers */
+#define COP0_WATCHLO1 $18,1 /* Address watch registers */
+#define COP0_WATCHHI0 $19,0 /* Address watch registers */
+#define COP0_WATCHHI1 $19,1 /* Address watch registers */
+#define COP0_XCONTEXT $20,0 /* OS context */
+#define COP0_MULTICOREDEBUG $22,0 /* Cavium debug */
+#define COP0_DEBUG $23,0 /* Debug status */
+#define COP0_DEPC $24,0 /* Debug PC */
+#define COP0_PERFCONTROL0 $25,0 /* Performance counter control */
+#define COP0_PERFCONTROL1 $25,2 /* Performance counter control */
+#define COP0_PERFVALUE0 $25,1 /* Performance counter */
+#define COP0_PERFVALUE1 $25,3 /* Performance counter */
+#define COP0_CACHEERRI $27,0 /* I cache error status */
+#define COP0_CACHEERRD $27,1 /* D cache error status */
+#define COP0_TAGLOI $28,0 /* I cache tagLo */
+#define COP0_TAGLOD $28,2 /* D cache tagLo */
+#define COP0_DATALOI $28,1 /* I cache dataLo */
+#define COP0_DATALOD $28,3 /* D cahce dataLo */
+#define COP0_TAGHI $29,2 /* ? */
+#define COP0_DATAHII $29,1 /* ? */
+#define COP0_DATAHID $29,3 /* ? */
+#define COP0_ERROREPC $30,0 /* Error PC */
+#define COP0_DESAVE $31,0 /* Debug scratch area */
+
+/* This header file can be included from a .S file. Keep non-preprocessor
+ things under !__ASSEMBLER__. */
+#ifndef __ASSEMBLER__
+
+#include "octeon-model.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -61,11 +121,23 @@ extern "C" {
#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
#define CVMX_TMP_STR2(x) #x
+#if !OCTEON_IS_COMMON_BINARY()
+ #if CVMX_COMPILED_FOR(OCTEON_CN63XX)
+ #define CVMX_CAVIUM_OCTEON2
+ #endif
+#endif
+
/* other useful stuff */
#define CVMX_BREAK asm volatile ("break")
#define CVMX_SYNC asm volatile ("sync" : : :"memory")
/* String version of SYNCW macro for using in inline asm constructs */
-#define CVMX_SYNCW_STR "syncw\nsyncw\n"
+#define CVMX_SYNCW_STR_OCTEON2 "syncw\n"
+#ifdef CVMX_CAVIUM_OCTEON2
+ #define CVMX_SYNCW_STR CVMX_SYNCW_STR_OCTEON2
+#else
+ #define CVMX_SYNCW_STR "syncw\nsyncw\n"
+#endif /* CVMX_CAVIUM_OCTEON2 */
+
#ifdef __OCTEON__
#define CVMX_SYNCIO asm volatile ("nop") /* Deprecated, will be removed in future release */
#define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : :"memory")
@@ -75,13 +147,20 @@ extern "C" {
errata Core-401. This can cause a single syncw to not enforce
ordering under very rare conditions. Even if it is rare, better safe
than sorry */
- #define CVMX_SYNCW asm volatile ("syncw\nsyncw\n" : : :"memory")
+ #define CVMX_SYNCW_OCTEON2 asm volatile ("syncw\n" : : :"memory")
+ #ifdef CVMX_CAVIUM_OCTEON2
+ #define CVMX_SYNCW CVMX_SYNCW_OCTEON2
+ #else
+ #define CVMX_SYNCW asm volatile ("syncw\nsyncw\n" : : :"memory")
+ #endif /* CVMX_CAVIUM_OCTEON2 */
#if defined(VXWORKS) || defined(__linux__)
- /* Define new sync instructions to be normal SYNC instructions for
- operating systems that use threads */
- #define CVMX_SYNCWS CVMX_SYNCW
- #define CVMX_SYNCS CVMX_SYNC
- #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+ /* Define new sync instructions to be normal SYNC instructions for
+ operating systems that use threads */
+ #define CVMX_SYNCWS CVMX_SYNCW
+ #define CVMX_SYNCS CVMX_SYNC
+ #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+ #define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW_OCTEON2
+ #define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR_OCTEON2
#else
#if defined(CVMX_BUILD_FOR_TOOLCHAIN)
/* While building simple exec toolchain, always use syncw to
@@ -89,15 +168,24 @@ extern "C" {
#define CVMX_SYNCWS CVMX_SYNCW
#define CVMX_SYNCS CVMX_SYNC
#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+ #define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW_OCTEON2
+ #define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR_OCTEON2
#else
/* Again, just like syncw, we may need two syncws instructions in a row due
- errata Core-401 */
- #define CVMX_SYNCWS asm volatile ("syncws\nsyncws\n" : : :"memory")
+ errata Core-401. Only one syncws is required for Octeon2 models */
#define CVMX_SYNCS asm volatile ("syncs" : : :"memory")
- #define CVMX_SYNCWS_STR "syncws\nsyncws\n"
+ #define CVMX_SYNCWS_OCTEON2 asm volatile ("syncws\n" : : :"memory")
+ #define CVMX_SYNCWS_STR_OCTEON2 "syncws\n"
+ #ifdef CVMX_CAVIUM_OCTEON2
+ #define CVMX_SYNCWS CVMX_SYNCWS_OCTEON2
+ #define CVMX_SYNCWS_STR CVMX_SYNCWS_STR_OCTEON2
+ #else
+ #define CVMX_SYNCWS asm volatile ("syncws\nsyncws\n" : : :"memory")
+ #define CVMX_SYNCWS_STR "syncws\nsyncws\n"
+ #endif /* CVMX_CAVIUM_OCTEON2 */
#endif
#endif
-#else
+#else /* !__OCTEON__ */
/* Not using a Cavium compiler, always use the slower sync so the assembler stays happy */
#define CVMX_SYNCIO asm volatile ("nop") /* Deprecated, will be removed in future release */
#define CVMX_SYNCIOBDMA asm volatile ("sync" : : :"memory")
@@ -106,6 +194,8 @@ extern "C" {
#define CVMX_SYNCWS CVMX_SYNCW
#define CVMX_SYNCS CVMX_SYNC
#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+ #define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW
+ #define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR
#endif
#define CVMX_SYNCI(address, offset) asm volatile ("synci " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
@@ -137,6 +227,12 @@ extern "C" {
#define CVMX_ICACHE_INVALIDATE2 { CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); } // flush stores, invalidate entire icache
#define CVMX_DCACHE_INVALIDATE { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); } // complete prefetches, invalidate entire dcache
+#define CVMX_CACHE(op, address, offset) asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
+#define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset) // fetch and lock the state.
+#define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset) // unlock the state.
+#define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset) // invalidate the cache block and clear the USED bits for the block
+#define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset) // load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register
+
/* new instruction to make RC4 run faster */
#define CVMX_BADDU(result, input1, input2) asm ("baddu %[rd],%[rs],%[rt]" : [rd] "=d" (result) : [rs] "d" (input1) , [rt] "d" (input2))
@@ -232,7 +328,7 @@ extern "C" {
ASM_STMT ("rdhwr\t%0,$" CVMX_TMP_STR(regstr) : "=d"(_v)); \
result = (typeof(result))_v; \
}})
-
+
# define CVMX_RDHWR(result, regstr) CVMX_RDHWRX(result, regstr, asm volatile)
@@ -408,18 +504,67 @@ extern "C" {
#define CVMX_MF_AES_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0104+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
#define CVMX_MF_AES_KEYLENGTH(val) asm volatile ("dmfc2 %[rt],0x0110" : [rt] "=d" (val) : ) // read the keylen
#define CVMX_MF_AES_DAT0(val) asm volatile ("dmfc2 %[rt],0x0111" : [rt] "=d" (val) : ) // first piece of input data
-/* GFM COP2 macros */
-/* index can be 0 or 1 */
-#define CVMX_MF_GFM_MUL(val, index) asm volatile ("dmfc2 %[rt],0x0258+" CVMX_TMP_STR(index) : [rt] "=d" (val) : )
-#define CVMX_MF_GFM_POLY(val) asm volatile ("dmfc2 %[rt],0x025e" : [rt] "=d" (val) : )
-#define CVMX_MF_GFM_RESINP(val, index) asm volatile ("dmfc2 %[rt],0x025a+" CVMX_TMP_STR(index) : [rt] "=d" (val) : )
-#define CVMX_MT_GFM_MUL(val, index) asm volatile ("dmtc2 %[rt],0x0258+" CVMX_TMP_STR(index) : : [rt] "d" (val))
-#define CVMX_MT_GFM_POLY(val) asm volatile ("dmtc2 %[rt],0x025e" : : [rt] "d" (val))
-#define CVMX_MT_GFM_RESINP(val, index) asm volatile ("dmtc2 %[rt],0x025a+" CVMX_TMP_STR(index) : : [rt] "d" (val))
-#define CVMX_MT_GFM_XOR0(val) asm volatile ("dmtc2 %[rt],0x025c" : : [rt] "d" (val))
-#define CVMX_MT_GFM_XORMUL1(val) asm volatile ("dmtc2 %[rt],0x425d" : : [rt] "d" (val))
+// GFM
+
+// pos can be 0-1
+#define CVMX_MF_GFM_MUL(val,pos) asm volatile ("dmfc2 %[rt],0x0258+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MF_GFM_POLY(val) asm volatile ("dmfc2 %[rt],0x025e" : [rt] "=d" (val) : )
+// pos can be 0-1
+#define CVMX_MF_GFM_RESINP(val,pos) asm volatile ("dmfc2 %[rt],0x025a+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-1
+#define CVMX_MF_GFM_RESINP_REFLECT(val,pos) asm volatile ("dmfc2 %[rt],0x005a+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+
+// pos can be 0-1
+#define CVMX_MT_GFM_MUL(val,pos) asm volatile ("dmtc2 %[rt],0x0258+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_GFM_POLY(val) asm volatile ("dmtc2 %[rt],0x025e" : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_GFM_RESINP(val,pos) asm volatile ("dmtc2 %[rt],0x025a+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_GFM_XOR0(val) asm volatile ("dmtc2 %[rt],0x025c" : : [rt] "d" (val))
+#define CVMX_MT_GFM_XORMUL1(val) asm volatile ("dmtc2 %[rt],0x425d" : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_GFM_MUL_REFLECT(val,pos) asm volatile ("dmtc2 %[rt],0x0058+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MT_GFM_XOR0_REFLECT(val) asm volatile ("dmtc2 %[rt],0x005c" : : [rt] "d" (val))
+#define CVMX_MT_GFM_XORMUL1_REFLECT(val) asm volatile ("dmtc2 %[rt],0x405d" : : [rt] "d" (val))
+
+// SNOW 3G
+
+// pos can be 0-7
+#define CVMX_MF_SNOW3G_LFSR(val,pos) asm volatile ("dmfc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-2
+#define CVMX_MF_SNOW3G_FSM(val,pos) asm volatile ("dmfc2 %[rt],0x0251+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MF_SNOW3G_RESULT(val) asm volatile ("dmfc2 %[rt],0x0250" : [rt] "=d" (val) : )
+// pos can be 0-7
+#define CVMX_MT_SNOW3G_LFSR(val,pos) asm volatile ("dmtc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+// pos can be 0-2
+#define CVMX_MT_SNOW3G_FSM(val,pos) asm volatile ("dmtc2 %[rt],0x0251+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_SNOW3G_RESULT(val) asm volatile ("dmtc2 %[rt],0x0250" : : [rt] "d" (val))
+#define CVMX_MT_SNOW3G_START(val) asm volatile ("dmtc2 %[rt],0x404d" : : [rt] "d" (val))
+#define CVMX_MT_SNOW3G_MORE(val) asm volatile ("dmtc2 %[rt],0x404e" : : [rt] "d" (val))
+
+// SMS4
+
+// pos can be 0-1
+#define CVMX_MF_SMS4_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0102+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-1
+#define CVMX_MF_SMS4_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0104+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-1
+#define CVMX_MF_SMS4_RESINP(val,pos) asm volatile ("dmfc2 %[rt],0x0100+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MT_SMS4_DEC_CBC0(val) asm volatile ("dmtc2 %[rt],0x010c" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_DEC_CBC1(val) asm volatile ("dmtc2 %[rt],0x311d" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_DEC0(val) asm volatile ("dmtc2 %[rt],0x010e" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_DEC1(val) asm volatile ("dmtc2 %[rt],0x311f" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_ENC_CBC0(val) asm volatile ("dmtc2 %[rt],0x0108" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_ENC_CBC1(val) asm volatile ("dmtc2 %[rt],0x3119" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_ENC0(val) asm volatile ("dmtc2 %[rt],0x010a" : : [rt] "d" (val))
+#define CVMX_MT_SMS4_ENC1(val) asm volatile ("dmtc2 %[rt],0x311b" : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_SMS4_IV(val,pos) asm volatile ("dmtc2 %[rt],0x0102+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_SMS4_KEY(val,pos) asm volatile ("dmtc2 %[rt],0x0104+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_SMS4_RESINP(val,pos) asm volatile ("dmtc2 %[rt],0x0100+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
/* check_ordering stuff */
#if 0
@@ -436,19 +581,25 @@ extern "C" {
#define CVMX_MT_CYCLE(src) asm volatile ("dmtc0 %[rt],$9,6" :: [rt] "d" (src))
-#define CVMX_MF_CACHE_ERR(val) asm volatile ("dmfc0 %[rt],$27,0" : [rt] "=d" (val):)
-#define CVMX_MF_DCACHE_ERR(val) asm volatile ("dmfc0 %[rt],$27,1" : [rt] "=d" (val):)
-#define CVMX_MF_CVM_MEM_CTL(val) asm volatile ("dmfc0 %[rt],$11,7" : [rt] "=d" (val):)
-#define CVMX_MF_CVM_CTL(val) asm volatile ("dmfc0 %[rt],$9,7" : [rt] "=d" (val):)
-#define CVMX_MT_CACHE_ERR(val) asm volatile ("dmtc0 %[rt],$27,0" : : [rt] "d" (val))
-#define CVMX_MT_DCACHE_ERR(val) asm volatile ("dmtc0 %[rt],$27,1" : : [rt] "d" (val))
-#define CVMX_MT_CVM_MEM_CTL(val) asm volatile ("dmtc0 %[rt],$11,7" : : [rt] "d" (val))
-#define CVMX_MT_CVM_CTL(val) asm volatile ("dmtc0 %[rt],$9,7" : : [rt] "d" (val))
+#define VASTR(...) #__VA_ARGS__
+
+#define CVMX_MF_COP0(val, cop0) asm volatile ("dmfc0 %[rt]," VASTR(cop0) : [rt] "=d" (val));
+#define CVMX_MT_COP0(val, cop0) asm volatile ("dmtc0 %[rt]," VASTR(cop0) : : [rt] "d" (val));
+
+#define CVMX_MF_CACHE_ERR(val) CVMX_MF_COP0(val, COP0_CACHEERRI)
+#define CVMX_MF_DCACHE_ERR(val) CVMX_MF_COP0(val, COP0_CACHEERRD)
+#define CVMX_MF_CVM_MEM_CTL(val) CVMX_MF_COP0(val, COP0_CVMMEMCTL)
+#define CVMX_MF_CVM_CTL(val) CVMX_MF_COP0(val, COP0_CVMCTL)
+#define CVMX_MT_CACHE_ERR(val) CVMX_MT_COP0(val, COP0_CACHEERRI)
+#define CVMX_MT_DCACHE_ERR(val) CVMX_MT_COP0(val, COP0_CACHEERRD)
+#define CVMX_MT_CVM_MEM_CTL(val) CVMX_MT_COP0(val, COP0_CVMMEMCTL)
+#define CVMX_MT_CVM_CTL(val) CVMX_MT_COP0(val, COP0_CVMCTL)
/* Macros for TLB */
#define CVMX_TLBWI asm volatile ("tlbwi" : : )
#define CVMX_TLBWR asm volatile ("tlbwr" : : )
#define CVMX_TLBR asm volatile ("tlbr" : : )
+#define CVMX_TLBP asm volatile ("tlbp" : : )
#define CVMX_MT_ENTRY_HIGH(val) asm volatile ("dmtc0 %[rt],$10,0" : : [rt] "d" (val))
#define CVMX_MT_ENTRY_LO_0(val) asm volatile ("dmtc0 %[rt],$2,0" : : [rt] "d" (val))
#define CVMX_MT_ENTRY_LO_1(val) asm volatile ("dmtc0 %[rt],$3,0" : : [rt] "d" (val))
@@ -464,12 +615,16 @@ extern "C" {
#define CVMX_MF_PAGEMASK(val) asm volatile ("mfc0 %[rt],$5,0" : [rt] "=d" (val):)
#define CVMX_MF_PAGEGRAIN(val) asm volatile ("mfc0 %[rt],$5,1" : [rt] "=d" (val):)
#define CVMX_MF_TLB_WIRED(val) asm volatile ("mfc0 %[rt],$6,0" : [rt] "=d" (val):)
+#define CVMX_MF_TLB_INDEX(val) asm volatile ("mfc0 %[rt],$0,0" : [rt] "=d" (val):)
#define CVMX_MF_TLB_RANDOM(val) asm volatile ("mfc0 %[rt],$1,0" : [rt] "=d" (val):)
#define TLB_DIRTY (0x1ULL<<2)
#define TLB_VALID (0x1ULL<<1)
#define TLB_GLOBAL (0x1ULL<<0)
+/* Macros to PUSH and POP Octeon2 ISA. */
+#define CVMX_PUSH_OCTEON2 asm volatile (".set push\n.set arch=octeon2")
+#define CVMX_POP_OCTEON2 asm volatile (".set pop")
/* assembler macros to guarantee byte loads/stores are used */
/* for an unaligned 16-bit access (these use AT register) */
@@ -510,4 +665,6 @@ extern "C" {
}
#endif
+#endif /* __ASSEMBLER__ */
+
#endif /* __CVMX_ASM_H__ */
diff --git a/cvmx-asx.h b/cvmx-asx.h
deleted file mode 100644
index 4a49a04b4c89..000000000000
--- a/cvmx-asx.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
- *
- ***********************license end**************************************/
-
-
-
-
-
-
-/**
- * @file
- *
- * Interface to the ASX hardware.
- *
- * <hr>$Revision: 41586 $<hr>
- */
-
-#ifndef __CVMX_ASX_H__
-#define __CVMX_ASX_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
diff --git a/cvmx-asx0-defs.h b/cvmx-asx0-defs.h
new file mode 100644
index 000000000000..42115db5980b
--- /dev/null
+++ b/cvmx-asx0-defs.h
@@ -0,0 +1,147 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-asx0-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon asx0.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_ASX0_TYPEDEFS_H__
+#define __CVMX_ASX0_TYPEDEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ASX0_DBG_DATA_DRV CVMX_ASX0_DBG_DATA_DRV_FUNC()
+static inline uint64_t CVMX_ASX0_DBG_DATA_DRV_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_ASX0_DBG_DATA_DRV not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800B0000208ull);
+}
+#else
+#define CVMX_ASX0_DBG_DATA_DRV (CVMX_ADD_IO_SEG(0x00011800B0000208ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_ASX0_DBG_DATA_ENABLE CVMX_ASX0_DBG_DATA_ENABLE_FUNC()
+static inline uint64_t CVMX_ASX0_DBG_DATA_ENABLE_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_ASX0_DBG_DATA_ENABLE not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00011800B0000200ull);
+}
+#else
+#define CVMX_ASX0_DBG_DATA_ENABLE (CVMX_ADD_IO_SEG(0x00011800B0000200ull))
+#endif
+
+/**
+ * cvmx_asx0_dbg_data_drv
+ *
+ * ASX_DBG_DATA_DRV
+ *
+ */
+union cvmx_asx0_dbg_data_drv
+{
+ uint64_t u64;
+ struct cvmx_asx0_dbg_data_drv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t pctl : 5; /**< These bits control the driving strength of the dbg
+ interface. */
+ uint64_t nctl : 4; /**< These bits control the driving strength of the dbg
+ interface. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 5;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_asx0_dbg_data_drv_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< These bits control the driving strength of the dbg
+ interface. */
+ uint64_t nctl : 4; /**< These bits control the driving strength of the dbg
+ interface. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn38xx;
+ struct cvmx_asx0_dbg_data_drv_cn38xx cn38xxp2;
+ struct cvmx_asx0_dbg_data_drv_s cn58xx;
+ struct cvmx_asx0_dbg_data_drv_s cn58xxp1;
+};
+typedef union cvmx_asx0_dbg_data_drv cvmx_asx0_dbg_data_drv_t;
+
+/**
+ * cvmx_asx0_dbg_data_enable
+ *
+ * ASX_DBG_DATA_ENABLE
+ *
+ */
+union cvmx_asx0_dbg_data_enable
+{
+ uint64_t u64;
+ struct cvmx_asx0_dbg_data_enable_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< A 1->0 transistion, turns the dbg interface OFF. */
+#else
+ uint64_t en : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asx0_dbg_data_enable_s cn38xx;
+ struct cvmx_asx0_dbg_data_enable_s cn38xxp2;
+ struct cvmx_asx0_dbg_data_enable_s cn58xx;
+ struct cvmx_asx0_dbg_data_enable_s cn58xxp1;
+};
+typedef union cvmx_asx0_dbg_data_enable cvmx_asx0_dbg_data_enable_t;
+
+#endif
diff --git a/cvmx-asxx-defs.h b/cvmx-asxx-defs.h
new file mode 100644
index 000000000000..0791d1b1d4da
--- /dev/null
+++ b/cvmx-asxx-defs.h
@@ -0,0 +1,1382 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-asxx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon asxx.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_ASXX_TYPEDEFS_H__
+#define __CVMX_ASXX_TYPEDEFS_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_GMII_RX_CLK_SET(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_ASXX_GMII_RX_CLK_SET(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000180ull);
+}
+#else
+#define CVMX_ASXX_GMII_RX_CLK_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000180ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_GMII_RX_DAT_SET(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_ASXX_GMII_RX_DAT_SET(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000188ull);
+}
+#else
+#define CVMX_ASXX_GMII_RX_DAT_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000188ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_INT_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_INT_EN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000018ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000018ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_INT_REG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_INT_REG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000010ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000010ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_MII_RX_DAT_SET(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_ASXX_MII_RX_DAT_SET(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000190ull);
+}
+#else
+#define CVMX_ASXX_MII_RX_DAT_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000190ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_PRT_LOOP(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_PRT_LOOP(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000040ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_PRT_LOOP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000040ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_BYPASS(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_BYPASS(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000248ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_BYPASS(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000248ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_BYPASS_SETTING(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_BYPASS_SETTING(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000250ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_BYPASS_SETTING(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000250ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_COMP(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_COMP(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000220ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_COMP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000220ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_DATA_DRV(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_DATA_DRV(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000218ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_DATA_DRV(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000218ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_FCRAM_MODE(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_FCRAM_MODE(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000210ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_FCRAM_MODE(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000210ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_NCTL_STRONG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_NCTL_STRONG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000230ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_NCTL_STRONG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000230ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_NCTL_WEAK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_NCTL_WEAK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000240ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_NCTL_WEAK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000240ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_PCTL_STRONG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_PCTL_STRONG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000228ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_PCTL_STRONG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000228ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_PCTL_WEAK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_PCTL_WEAK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000238ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_PCTL_WEAK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000238ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RLD_SETTING(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_SETTING(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000258ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RLD_SETTING(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000258ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_CLK_SETX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_ASXX_RX_CLK_SETX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
+}
+#else
+#define CVMX_ASXX_RX_CLK_SETX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_PRT_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_PRT_EN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000000ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000000ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_WOL(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000100ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RX_WOL(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000100ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_WOL_MSK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL_MSK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000108ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RX_WOL_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000108ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_WOL_POWOK(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL_POWOK(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000118ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RX_WOL_POWOK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000118ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_RX_WOL_SIG(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL_SIG(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000110ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_RX_WOL_SIG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000110ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_TX_CLK_SETX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_ASXX_TX_CLK_SETX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
+}
+#else
+#define CVMX_ASXX_TX_CLK_SETX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_TX_COMP_BYP(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_TX_COMP_BYP(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000068ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_TX_COMP_BYP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000068ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_TX_HI_WATERX(unsigned long offset, unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_ASXX_TX_HI_WATERX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
+}
+#else
+#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_ASXX_TX_PRT_EN(unsigned long block_id)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_TX_PRT_EN(%lu) is invalid on this chip\n", block_id);
+ return CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull;
+}
+#else
+#define CVMX_ASXX_TX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull)
+#endif
+
+/**
+ * cvmx_asx#_gmii_rx_clk_set
+ *
+ * ASX_GMII_RX_CLK_SET = GMII Clock delay setting
+ *
+ */
+union cvmx_asxx_gmii_rx_clk_set
+{
+ uint64_t u64;
+ struct cvmx_asxx_gmii_rx_clk_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the RXCLK (GMII receive clk)
+ delay line. The intrinsic delay can range from
+ 50ps to 80ps per tap. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
+};
+typedef union cvmx_asxx_gmii_rx_clk_set cvmx_asxx_gmii_rx_clk_set_t;
+
+/**
+ * cvmx_asx#_gmii_rx_dat_set
+ *
+ * ASX_GMII_RX_DAT_SET = GMII Clock delay setting
+ *
+ */
+union cvmx_asxx_gmii_rx_dat_set
+{
+ uint64_t u64;
+ struct cvmx_asxx_gmii_rx_dat_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the RXD (GMII receive data)
+ delay lines. The intrinsic delay can range from
+ 50ps to 80ps per tap. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
+};
+typedef union cvmx_asxx_gmii_rx_dat_set cvmx_asxx_gmii_rx_dat_set_t;
+
+/**
+ * cvmx_asx#_int_en
+ *
+ * ASX_INT_EN = Interrupt Enable
+ *
+ */
+union cvmx_asxx_int_en
+{
+ uint64_t u64;
+ struct cvmx_asxx_int_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t txpsh : 4; /**< TX FIFO overflow on RMGII port */
+ uint64_t txpop : 4; /**< TX FIFO underflow on RMGII port */
+ uint64_t ovrflw : 4; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 4;
+ uint64_t txpop : 4;
+ uint64_t txpsh : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_asxx_int_en_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_11_63 : 53;
+ uint64_t txpsh : 3; /**< TX FIFO overflow on RMGII port */
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpop : 3; /**< TX FIFO underflow on RMGII port */
+ uint64_t reserved_3_3 : 1;
+ uint64_t ovrflw : 3; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t txpop : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpsh : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_int_en_cn30xx cn31xx;
+ struct cvmx_asxx_int_en_s cn38xx;
+ struct cvmx_asxx_int_en_s cn38xxp2;
+ struct cvmx_asxx_int_en_cn30xx cn50xx;
+ struct cvmx_asxx_int_en_s cn58xx;
+ struct cvmx_asxx_int_en_s cn58xxp1;
+};
+typedef union cvmx_asxx_int_en cvmx_asxx_int_en_t;
+
+/**
+ * cvmx_asx#_int_reg
+ *
+ * ASX_INT_REG = Interrupt Register
+ *
+ */
+union cvmx_asxx_int_reg
+{
+ uint64_t u64;
+ struct cvmx_asxx_int_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t txpsh : 4; /**< TX FIFO overflow on RMGII port */
+ uint64_t txpop : 4; /**< TX FIFO underflow on RMGII port */
+ uint64_t ovrflw : 4; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 4;
+ uint64_t txpop : 4;
+ uint64_t txpsh : 4;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } s;
+ struct cvmx_asxx_int_reg_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_11_63 : 53;
+ uint64_t txpsh : 3; /**< TX FIFO overflow on RMGII port */
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpop : 3; /**< TX FIFO underflow on RMGII port */
+ uint64_t reserved_3_3 : 1;
+ uint64_t ovrflw : 3; /**< RX FIFO overflow on RMGII port */
+#else
+ uint64_t ovrflw : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t txpop : 3;
+ uint64_t reserved_7_7 : 1;
+ uint64_t txpsh : 3;
+ uint64_t reserved_11_63 : 53;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_int_reg_cn30xx cn31xx;
+ struct cvmx_asxx_int_reg_s cn38xx;
+ struct cvmx_asxx_int_reg_s cn38xxp2;
+ struct cvmx_asxx_int_reg_cn30xx cn50xx;
+ struct cvmx_asxx_int_reg_s cn58xx;
+ struct cvmx_asxx_int_reg_s cn58xxp1;
+};
+typedef union cvmx_asxx_int_reg cvmx_asxx_int_reg_t;
+
+/**
+ * cvmx_asx#_mii_rx_dat_set
+ *
+ * ASX_MII_RX_DAT_SET = GMII Clock delay setting
+ *
+ */
+union cvmx_asxx_mii_rx_dat_set
+{
+ uint64_t u64;
+ struct cvmx_asxx_mii_rx_dat_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the RXD (MII receive data)
+ delay lines. The intrinsic delay can range from
+ 50ps to 80ps per tap. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
+ struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
+};
+typedef union cvmx_asxx_mii_rx_dat_set cvmx_asxx_mii_rx_dat_set_t;
+
+/**
+ * cvmx_asx#_prt_loop
+ *
+ * ASX_PRT_LOOP = Internal Loopback mode - TX FIFO output goes into RX FIFO (and maybe pins)
+ *
+ */
+union cvmx_asxx_prt_loop
+{
+ uint64_t u64;
+ struct cvmx_asxx_prt_loop_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t ext_loop : 4; /**< External Loopback Enable
+ 0 = No Loopback (TX FIFO is filled by RMGII)
+ 1 = RX FIFO drives the TX FIFO
+ - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
+ - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
+ - core clock > 250MHZ
+ - rxc must not deviate from the +-50ppm
+ - if txc>rxc, idle cycle may drop over time */
+ uint64_t int_loop : 4; /**< Internal Loopback Enable
+ 0 = No Loopback (RX FIFO is filled by RMGII pins)
+ 1 = TX FIFO drives the RX FIFO
+ Note, in internal loop-back mode, the RGMII link
+ status is not used (since there is no real PHY).
+ Software cannot use the inband status. */
+#else
+ uint64_t int_loop : 4;
+ uint64_t ext_loop : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_asxx_prt_loop_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_7_63 : 57;
+ uint64_t ext_loop : 3; /**< External Loopback Enable
+ 0 = No Loopback (TX FIFO is filled by RMGII)
+ 1 = RX FIFO drives the TX FIFO
+ - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
+ - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
+ - core clock > 250MHZ
+ - rxc must not deviate from the +-50ppm
+ - if txc>rxc, idle cycle may drop over time */
+ uint64_t reserved_3_3 : 1;
+ uint64_t int_loop : 3; /**< Internal Loopback Enable
+ 0 = No Loopback (RX FIFO is filled by RMGII pins)
+ 1 = TX FIFO drives the RX FIFO
+ - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
+ - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
+ - GMX_TX_CLK[CLK_CNT] must be 1
+ Note, in internal loop-back mode, the RGMII link
+ status is not used (since there is no real PHY).
+ Software cannot use the inband status. */
+#else
+ uint64_t int_loop : 3;
+ uint64_t reserved_3_3 : 1;
+ uint64_t ext_loop : 3;
+ uint64_t reserved_7_63 : 57;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_prt_loop_cn30xx cn31xx;
+ struct cvmx_asxx_prt_loop_s cn38xx;
+ struct cvmx_asxx_prt_loop_s cn38xxp2;
+ struct cvmx_asxx_prt_loop_cn30xx cn50xx;
+ struct cvmx_asxx_prt_loop_s cn58xx;
+ struct cvmx_asxx_prt_loop_s cn58xxp1;
+};
+typedef union cvmx_asxx_prt_loop cvmx_asxx_prt_loop_t;
+
+/**
+ * cvmx_asx#_rld_bypass
+ *
+ * ASX_RLD_BYPASS
+ *
+ */
+union cvmx_asxx_rld_bypass
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_bypass_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t bypass : 1; /**< When set, the rld_dll setting is bypassed with
+ ASX_RLD_BYPASS_SETTING */
+#else
+ uint64_t bypass : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asxx_rld_bypass_s cn38xx;
+ struct cvmx_asxx_rld_bypass_s cn38xxp2;
+ struct cvmx_asxx_rld_bypass_s cn58xx;
+ struct cvmx_asxx_rld_bypass_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_bypass cvmx_asxx_rld_bypass_t;
+
+/**
+ * cvmx_asx#_rld_bypass_setting
+ *
+ * ASX_RLD_BYPASS_SETTING
+ *
+ */
+union cvmx_asxx_rld_bypass_setting
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_bypass_setting_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< The rld_dll setting bypass value */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_bypass_setting_s cn38xx;
+ struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
+ struct cvmx_asxx_rld_bypass_setting_s cn58xx;
+ struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_bypass_setting cvmx_asxx_rld_bypass_setting_t;
+
+/**
+ * cvmx_asx#_rld_comp
+ *
+ * ASX_RLD_COMP
+ *
+ */
+union cvmx_asxx_rld_comp
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_comp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t pctl : 5; /**< PCTL Compensation Value
+ These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+ uint64_t nctl : 4; /**< These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 5;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+ struct cvmx_asxx_rld_comp_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+ uint64_t nctl : 4; /**< These bits reflect the computed compensation
+ values from the built-in compensation circuit. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn38xx;
+ struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
+ struct cvmx_asxx_rld_comp_s cn58xx;
+ struct cvmx_asxx_rld_comp_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_comp cvmx_asxx_rld_comp_t;
+
+/**
+ * cvmx_asx#_rld_data_drv
+ *
+ * ASX_RLD_DATA_DRV
+ *
+ */
+union cvmx_asxx_rld_data_drv
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_data_drv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< These bits specify a driving strength (positive
+ integer) for the RLD I/Os when the built-in
+ compensation circuit is bypassed. */
+ uint64_t nctl : 4; /**< These bits specify a driving strength (positive
+ integer) for the RLD I/Os when the built-in
+ compensation circuit is bypassed. */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } s;
+ struct cvmx_asxx_rld_data_drv_s cn38xx;
+ struct cvmx_asxx_rld_data_drv_s cn38xxp2;
+ struct cvmx_asxx_rld_data_drv_s cn58xx;
+ struct cvmx_asxx_rld_data_drv_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_data_drv cvmx_asxx_rld_data_drv_t;
+
+/**
+ * cvmx_asx#_rld_fcram_mode
+ *
+ * ASX_RLD_FCRAM_MODE
+ *
+ */
+union cvmx_asxx_rld_fcram_mode
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_fcram_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t mode : 1; /**< Memory Mode
+ - 0: RLDRAM
+ - 1: FCRAM */
+#else
+ uint64_t mode : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asxx_rld_fcram_mode_s cn38xx;
+ struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
+};
+typedef union cvmx_asxx_rld_fcram_mode cvmx_asxx_rld_fcram_mode_t;
+
+/**
+ * cvmx_asx#_rld_nctl_strong
+ *
+ * ASX_RLD_NCTL_STRONG
+ *
+ */
+union cvmx_asxx_rld_nctl_strong
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_nctl_strong_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t nctl : 5; /**< Duke's drive control */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_nctl_strong_s cn38xx;
+ struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
+ struct cvmx_asxx_rld_nctl_strong_s cn58xx;
+ struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_nctl_strong cvmx_asxx_rld_nctl_strong_t;
+
+/**
+ * cvmx_asx#_rld_nctl_weak
+ *
+ * ASX_RLD_NCTL_WEAK
+ *
+ */
+union cvmx_asxx_rld_nctl_weak
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_nctl_weak_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t nctl : 5; /**< UNUSED (not needed for CN58XX) */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_nctl_weak_s cn38xx;
+ struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
+ struct cvmx_asxx_rld_nctl_weak_s cn58xx;
+ struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_nctl_weak cvmx_asxx_rld_nctl_weak_t;
+
+/**
+ * cvmx_asx#_rld_pctl_strong
+ *
+ * ASX_RLD_PCTL_STRONG
+ *
+ */
+union cvmx_asxx_rld_pctl_strong
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_pctl_strong_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t pctl : 5; /**< Duke's drive control */
+#else
+ uint64_t pctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_pctl_strong_s cn38xx;
+ struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
+ struct cvmx_asxx_rld_pctl_strong_s cn58xx;
+ struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_pctl_strong cvmx_asxx_rld_pctl_strong_t;
+
+/**
+ * cvmx_asx#_rld_pctl_weak
+ *
+ * ASX_RLD_PCTL_WEAK
+ *
+ */
+union cvmx_asxx_rld_pctl_weak
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_pctl_weak_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t pctl : 5; /**< UNUSED (not needed for CN58XX) */
+#else
+ uint64_t pctl : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rld_pctl_weak_s cn38xx;
+ struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
+ struct cvmx_asxx_rld_pctl_weak_s cn58xx;
+ struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_pctl_weak cvmx_asxx_rld_pctl_weak_t;
+
+/**
+ * cvmx_asx#_rld_setting
+ *
+ * ASX_RLD_SETTING
+ *
+ */
+union cvmx_asxx_rld_setting
+{
+ uint64_t u64;
+ struct cvmx_asxx_rld_setting_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_13_63 : 51;
+ uint64_t dfaset : 5; /**< RLD ClkGen DLL Setting(debug) */
+ uint64_t dfalag : 1; /**< RLD ClkGen DLL Lag Error(debug) */
+ uint64_t dfalead : 1; /**< RLD ClkGen DLL Lead Error(debug) */
+ uint64_t dfalock : 1; /**< RLD ClkGen DLL Lock acquisition(debug) */
+ uint64_t setting : 5; /**< RLDCK90 DLL Setting(debug) */
+#else
+ uint64_t setting : 5;
+ uint64_t dfalock : 1;
+ uint64_t dfalead : 1;
+ uint64_t dfalag : 1;
+ uint64_t dfaset : 5;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } s;
+ struct cvmx_asxx_rld_setting_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< This is the read-only true rld dll_setting. */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } cn38xx;
+ struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
+ struct cvmx_asxx_rld_setting_s cn58xx;
+ struct cvmx_asxx_rld_setting_s cn58xxp1;
+};
+typedef union cvmx_asxx_rld_setting cvmx_asxx_rld_setting_t;
+
+/**
+ * cvmx_asx#_rx_clk_set#
+ *
+ * ASX_RX_CLK_SET = RGMII Clock delay setting
+ *
+ *
+ * Notes:
+ * Setting to place on the open-loop RXC (RGMII receive clk)
+ * delay line, which can delay the recieved clock. This
+ * can be used if the board and/or transmitting device
+ * has not otherwise delayed the clock.
+ *
+ * A value of SETTING=0 disables the delay line. The delay
+ * line should be disabled unless the transmitter or board
+ * does not delay the clock.
+ *
+ * Note that this delay line provides only a coarse control
+ * over the delay. Generally, it can only reliably provide
+ * a delay in the range 1.25-2.5ns, which may not be adequate
+ * for some system applications.
+ *
+ * The open loop delay line selects
+ * from among a series of tap positions. Each incremental
+ * tap position adds a delay of 50ps to 135ps per tap, depending
+ * on the chip, its temperature, and the voltage.
+ * To achieve from 1.25-2.5ns of delay on the recieved
+ * clock, a fixed value of SETTING=24 may work.
+ * For more precision, we recommend the following settings
+ * based on the chip voltage:
+ *
+ * VDD SETTING
+ * -----------------------------
+ * 1.0 18
+ * 1.05 19
+ * 1.1 21
+ * 1.15 22
+ * 1.2 23
+ * 1.25 24
+ * 1.3 25
+ */
+union cvmx_asxx_rx_clk_setx
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_clk_setx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the open-loop RXC delay line */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_rx_clk_setx_s cn30xx;
+ struct cvmx_asxx_rx_clk_setx_s cn31xx;
+ struct cvmx_asxx_rx_clk_setx_s cn38xx;
+ struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
+ struct cvmx_asxx_rx_clk_setx_s cn50xx;
+ struct cvmx_asxx_rx_clk_setx_s cn58xx;
+ struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
+};
+typedef union cvmx_asxx_rx_clk_setx cvmx_asxx_rx_clk_setx_t;
+
+/**
+ * cvmx_asx#_rx_prt_en
+ *
+ * ASX_RX_PRT_EN = RGMII Port Enable
+ *
+ */
+union cvmx_asxx_rx_prt_en
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_prt_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t prt_en : 4; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_asxx_rx_prt_en_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_3_63 : 61;
+ uint64_t prt_en : 3; /**< Port enable. Must be set for Octane to receive
+ RMGII traffic. When this bit clear on a given
+ port, then the all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
+ struct cvmx_asxx_rx_prt_en_s cn38xx;
+ struct cvmx_asxx_rx_prt_en_s cn38xxp2;
+ struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
+ struct cvmx_asxx_rx_prt_en_s cn58xx;
+ struct cvmx_asxx_rx_prt_en_s cn58xxp1;
+};
+typedef union cvmx_asxx_rx_prt_en cvmx_asxx_rx_prt_en_t;
+
+/**
+ * cvmx_asx#_rx_wol
+ *
+ * ASX_RX_WOL = RGMII RX Wake on LAN status register
+ *
+ */
+union cvmx_asxx_rx_wol
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t status : 1; /**< Copy of PMCSR[15] - PME_status */
+ uint64_t enable : 1; /**< Copy of PMCSR[8] - PME_enable */
+#else
+ uint64_t enable : 1;
+ uint64_t status : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_s cn38xx;
+ struct cvmx_asxx_rx_wol_s cn38xxp2;
+};
+typedef union cvmx_asxx_rx_wol cvmx_asxx_rx_wol_t;
+
+/**
+ * cvmx_asx#_rx_wol_msk
+ *
+ * ASX_RX_WOL_MSK = RGMII RX Wake on LAN byte mask
+ *
+ */
+union cvmx_asxx_rx_wol_msk
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_msk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t msk : 64; /**< Bytes to include in the CRC signature */
+#else
+ uint64_t msk : 64;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_msk_s cn38xx;
+ struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
+};
+typedef union cvmx_asxx_rx_wol_msk cvmx_asxx_rx_wol_msk_t;
+
+/**
+ * cvmx_asx#_rx_wol_powok
+ *
+ * ASX_RX_WOL_POWOK = RGMII RX Wake on LAN Power OK
+ *
+ */
+union cvmx_asxx_rx_wol_powok
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_powok_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t powerok : 1; /**< Power OK */
+#else
+ uint64_t powerok : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_powok_s cn38xx;
+ struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
+};
+typedef union cvmx_asxx_rx_wol_powok cvmx_asxx_rx_wol_powok_t;
+
+/**
+ * cvmx_asx#_rx_wol_sig
+ *
+ * ASX_RX_WOL_SIG = RGMII RX Wake on LAN CRC signature
+ *
+ */
+union cvmx_asxx_rx_wol_sig
+{
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_sig_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_32_63 : 32;
+ uint64_t sig : 32; /**< CRC signature */
+#else
+ uint64_t sig : 32;
+ uint64_t reserved_32_63 : 32;
+#endif
+ } s;
+ struct cvmx_asxx_rx_wol_sig_s cn38xx;
+ struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
+};
+typedef union cvmx_asxx_rx_wol_sig cvmx_asxx_rx_wol_sig_t;
+
+/**
+ * cvmx_asx#_tx_clk_set#
+ *
+ * ASX_TX_CLK_SET = RGMII Clock delay setting
+ *
+ *
+ * Notes:
+ * Setting to place on the open-loop TXC (RGMII transmit clk)
+ * delay line, which can delay the transmited clock. This
+ * can be used if the board and/or transmitting device
+ * has not otherwise delayed the clock.
+ *
+ * A value of SETTING=0 disables the delay line. The delay
+ * line should be disabled unless the transmitter or board
+ * does not delay the clock.
+ *
+ * Note that this delay line provides only a coarse control
+ * over the delay. Generally, it can only reliably provide
+ * a delay in the range 1.25-2.5ns, which may not be adequate
+ * for some system applications.
+ *
+ * The open loop delay line selects
+ * from among a series of tap positions. Each incremental
+ * tap position adds a delay of 50ps to 135ps per tap, depending
+ * on the chip, its temperature, and the voltage.
+ * To achieve from 1.25-2.5ns of delay on the recieved
+ * clock, a fixed value of SETTING=24 may work.
+ * For more precision, we recommend the following settings
+ * based on the chip voltage:
+ *
+ * VDD SETTING
+ * -----------------------------
+ * 1.0 18
+ * 1.05 19
+ * 1.1 21
+ * 1.15 22
+ * 1.2 23
+ * 1.25 24
+ * 1.3 25
+ */
+union cvmx_asxx_tx_clk_setx
+{
+ uint64_t u64;
+ struct cvmx_asxx_tx_clk_setx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t setting : 5; /**< Setting to place on the open-loop TXC delay line */
+#else
+ uint64_t setting : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_asxx_tx_clk_setx_s cn30xx;
+ struct cvmx_asxx_tx_clk_setx_s cn31xx;
+ struct cvmx_asxx_tx_clk_setx_s cn38xx;
+ struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
+ struct cvmx_asxx_tx_clk_setx_s cn50xx;
+ struct cvmx_asxx_tx_clk_setx_s cn58xx;
+ struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
+};
+typedef union cvmx_asxx_tx_clk_setx cvmx_asxx_tx_clk_setx_t;
+
+/**
+ * cvmx_asx#_tx_comp_byp
+ *
+ * ASX_TX_COMP_BYP = RGMII Clock delay setting
+ *
+ */
+union cvmx_asxx_tx_comp_byp
+{
+ uint64_t u64;
+ struct cvmx_asxx_tx_comp_byp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_0_63 : 64;
+#else
+ uint64_t reserved_0_63 : 64;
+#endif
+ } s;
+ struct cvmx_asxx_tx_comp_byp_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_9_63 : 55;
+ uint64_t bypass : 1; /**< Compensation bypass */
+ uint64_t pctl : 4; /**< PCTL Compensation Value (see Duke) */
+ uint64_t nctl : 4; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t bypass : 1;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
+ struct cvmx_asxx_tx_comp_byp_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_8_63 : 56;
+ uint64_t pctl : 4; /**< PCTL Compensation Value (see Duke) */
+ uint64_t nctl : 4; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 4;
+ uint64_t pctl : 4;
+ uint64_t reserved_8_63 : 56;
+#endif
+ } cn38xx;
+ struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
+ struct cvmx_asxx_tx_comp_byp_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_17_63 : 47;
+ uint64_t bypass : 1; /**< Compensation bypass */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pctl : 5; /**< PCTL Compensation Value (see Duke) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t bypass : 1;
+ uint64_t reserved_17_63 : 47;
+#endif
+ } cn50xx;
+ struct cvmx_asxx_tx_comp_byp_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_13_63 : 51;
+ uint64_t pctl : 5; /**< PCTL Compensation Value (see Duke) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t nctl : 5; /**< NCTL Compensation Value (see Duke) */
+#else
+ uint64_t nctl : 5;
+ uint64_t reserved_5_7 : 3;
+ uint64_t pctl : 5;
+ uint64_t reserved_13_63 : 51;
+#endif
+ } cn58xx;
+ struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
+};
+typedef union cvmx_asxx_tx_comp_byp cvmx_asxx_tx_comp_byp_t;
+
+/**
+ * cvmx_asx#_tx_hi_water#
+ *
+ * ASX_TX_HI_WATER = RGMII TX FIFO Hi WaterMark
+ *
+ */
+union cvmx_asxx_tx_hi_waterx
+{
+ uint64_t u64;
+ struct cvmx_asxx_tx_hi_waterx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t mark : 4; /**< TX FIFO HiWatermark to stall GMX
+ Value of 0 maps to 16
+ Reset value changed from 10 in pass1
+ Pass1 settings (assuming 125 tclk)
+ - 325-375: 12
+ - 375-437: 11
+ - 437-550: 10
+ - 550-687: 9 */
+#else
+ uint64_t mark : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_3_63 : 61;
+ uint64_t mark : 3; /**< TX FIFO HiWatermark to stall GMX
+ Value 0 maps to 8. */
+#else
+ uint64_t mark : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn38xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn58xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
+};
+typedef union cvmx_asxx_tx_hi_waterx cvmx_asxx_tx_hi_waterx_t;
+
+/**
+ * cvmx_asx#_tx_prt_en
+ *
+ * ASX_TX_PRT_EN = RGMII Port Enable
+ *
+ */
+union cvmx_asxx_tx_prt_en
+{
+ uint64_t u64;
+ struct cvmx_asxx_tx_prt_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t prt_en : 4; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } s;
+ struct cvmx_asxx_tx_prt_en_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_3_63 : 61;
+ uint64_t prt_en : 3; /**< Port enable. Must be set for Octane to send
+ RMGII traffic. When this bit clear on a given
+ port, then all RGMII cycles will appear as
+ inter-frame cycles. */
+#else
+ uint64_t prt_en : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
+ struct cvmx_asxx_tx_prt_en_s cn38xx;
+ struct cvmx_asxx_tx_prt_en_s cn38xxp2;
+ struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
+ struct cvmx_asxx_tx_prt_en_s cn58xx;
+ struct cvmx_asxx_tx_prt_en_s cn58xxp1;
+};
+typedef union cvmx_asxx_tx_prt_en cvmx_asxx_tx_prt_en_t;
+
+#endif
diff --git a/cvmx-atomic.h b/cvmx-atomic.h
index 6446130be32d..ba1fafaa985c 100644
--- a/cvmx-atomic.h
+++ b/cvmx-atomic.h
@@ -1,39 +1,40 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
- *
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
@@ -41,12 +42,13 @@
+
/**
* @file
*
* This file provides atomic operations
*
- * <hr>$Revision: 41586 $<hr>
+ * <hr>$Revision: 49448 $<hr>
*
*
*/
@@ -353,18 +355,44 @@ static inline int64_t cvmx_atomic_fetch_and_add64_nosync(int64_t *ptr, int64_t i
{
uint64_t tmp, ret;
- __asm__ __volatile__(
- ".set noreorder \n"
- "1: lld %[tmp], %[val] \n"
- " move %[ret], %[tmp] \n"
- " daddu %[tmp], %[inc] \n"
- " scd %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- ".set reorder \n"
- : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
- : [inc] "r" (incr)
- : "memory");
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ CVMX_PUSH_OCTEON2;
+ if (__builtin_constant_p(incr) && incr == 1)
+ {
+ __asm__ __volatile__(
+ "laid %0,(%2)"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
+ }
+ else if (__builtin_constant_p(incr) && incr == -1)
+ {
+ __asm__ __volatile__(
+ "ladd %0,(%2)"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ "laad %0,(%2),%3"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr), "r" (incr) : "memory");
+ }
+ CVMX_POP_OCTEON2;
+ }
+ else
+ {
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " daddu %[tmp], %[inc] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [inc] "r" (incr)
+ : "memory");
+ }
return (ret);
}
@@ -408,18 +436,44 @@ static inline int32_t cvmx_atomic_fetch_and_add32_nosync(int32_t *ptr, int32_t i
{
uint32_t tmp, ret;
- __asm__ __volatile__(
- ".set noreorder \n"
- "1: ll %[tmp], %[val] \n"
- " move %[ret], %[tmp] \n"
- " addu %[tmp], %[inc] \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- ".set reorder \n"
- : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
- : [inc] "r" (incr)
- : "memory");
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ CVMX_PUSH_OCTEON2;
+ if (__builtin_constant_p(incr) && incr == 1)
+ {
+ __asm__ __volatile__(
+ "lai %0,(%2)"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
+ }
+ else if (__builtin_constant_p(incr) && incr == -1)
+ {
+ __asm__ __volatile__(
+ "lad %0,(%2)"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ "laa %0,(%2),%3"
+ : "=r" (ret), "+m" (ptr) : "r" (ptr), "r" (incr) : "memory");
+ }
+ CVMX_POP_OCTEON2;
+ }
+ else
+ {
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " addu %[tmp], %[inc] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [inc] "r" (incr)
+ : "memory");
+ }
return (ret);
}
@@ -538,9 +592,8 @@ static inline uint64_t cvmx_atomic_fetch_and_bclr64_nosync(uint64_t *ptr, uint64
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
- : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
- : [msk] "r" (mask)
- : "memory");
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret), [msk] "+r" (mask)
+ : : "memory");
return (ret);
}
@@ -572,9 +625,8 @@ static inline uint32_t cvmx_atomic_fetch_and_bclr32_nosync(uint32_t *ptr, uint32
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
- : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
- : [msk] "r" (mask)
- : "memory");
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret), [msk] "+r" (mask)
+ : : "memory");
return (ret);
}
@@ -596,17 +648,43 @@ static inline uint64_t cvmx_atomic_swap64_nosync(uint64_t *ptr, uint64_t new_val
{
uint64_t tmp, ret;
- __asm__ __volatile__(
- ".set noreorder \n"
- "1: lld %[ret], %[val] \n"
- " move %[tmp], %[new_val] \n"
- " scd %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- ".set reorder \n"
- : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
- : [new_val] "r" (new_val)
- : "memory");
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ CVMX_PUSH_OCTEON2;
+ if (__builtin_constant_p(new_val) && new_val == 0)
+ {
+ __asm__ __volatile__(
+ "lacd %0,(%1)"
+ : "=r" (ret) : "r" (ptr) : "memory");
+ }
+ else if (__builtin_constant_p(new_val) && new_val == ~0ull)
+ {
+ __asm__ __volatile__(
+ "lasd %0,(%1)"
+ : "=r" (ret) : "r" (ptr) : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ "lawd %0,(%1),%2"
+ : "=r" (ret) : "r" (ptr), "r" (new_val) : "memory");
+ }
+ CVMX_POP_OCTEON2;
+ }
+ else
+ {
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[ret], %[val] \n"
+ " move %[tmp], %[new_val] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [new_val] "r" (new_val)
+ : "memory");
+ }
return (ret);
}
@@ -628,17 +706,43 @@ static inline uint32_t cvmx_atomic_swap32_nosync(uint32_t *ptr, uint32_t new_val
{
uint32_t tmp, ret;
- __asm__ __volatile__(
- ".set noreorder \n"
- "1: ll %[ret], %[val] \n"
- " move %[tmp], %[new_val] \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- ".set reorder \n"
- : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
- : [new_val] "r" (new_val)
- : "memory");
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ CVMX_PUSH_OCTEON2;
+ if (__builtin_constant_p(new_val) && new_val == 0)
+ {
+ __asm__ __volatile__(
+ "lac %0,(%1)"
+ : "=r" (ret) : "r" (ptr) : "memory");
+ }
+ else if (__builtin_constant_p(new_val) && new_val == ~0u)
+ {
+ __asm__ __volatile__(
+ "las %0,(%1)"
+ : "=r" (ret) : "r" (ptr) : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ "law %0,(%1),%2"
+ : "=r" (ret) : "r" (ptr), "r" (new_val) : "memory");
+ }
+ CVMX_POP_OCTEON2;
+ }
+ else
+ {
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[ret], %[val] \n"
+ " move %[tmp], %[new_val] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [new_val] "r" (new_val)
+ : "memory");
+ }
return (ret);
}
diff --git a/cvmx-bootloader.h b/cvmx-bootloader.h
index c1097aa33991..caf460974db5 100644
--- a/cvmx-bootloader.h
+++ b/cvmx-bootloader.h
@@ -1,43 +1,45 @@
/***********************license start***************
- * Copyright (c) 2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+
#ifndef __CVMX_BOOTLOADER__
#define __CVMX_BOOTLOADER__
@@ -48,7 +50,7 @@
*
* Bootloader definitions that are shared with other programs
*
- * <hr>$Revision: 41586 $<hr>
+ * <hr>$Revision: 49448 $<hr>
*/
@@ -65,17 +67,18 @@
#define BOOTLOADER_HEADER_MAX_SIZE 0x200 /* limited by the space to the next exception handler */
#define BOOTLOADER_HEADER_CURRENT_MAJOR_REV 1
-#define BOOTLOADER_HEADER_CURRENT_MINOR_REV 1
+#define BOOTLOADER_HEADER_CURRENT_MINOR_REV 2
+/* Revision history
+* 1.1 Initial released revision. (SDK 1.9)
+* 1.2 TLB based relocatable image (SDK 2.0)
+*
+*
+*/
/* offsets to struct bootloader_header fields for assembly use */
-#define MAGIC_OFFST 8
-#define HCRC_OFFST 12
-#define HLEN_OFFST 16
-#define DLEN_OFFST 24
-#define DCRC_OFFST 28
-#define GOT_OFFST 48
+#define GOT_ADDRESS_OFFSET 48
-#define LOOKUP_STEP 8192
+#define LOOKUP_STEP (64*1024)
#ifndef __ASSEMBLY__
typedef struct bootloader_header
@@ -86,7 +89,7 @@ typedef struct bootloader_header
*/
uint32_t nop_instr; /* Must be 0x0 */
uint32_t magic; /* Magic number to identify header */
- uint32_t hcrc; /* CRC of all of header excluding this field */
+ uint32_t hcrc; /* CRC of all of header excluding this field */
uint16_t hlen; /* Length of header in bytes */
uint16_t maj_rev; /* Major revision */
@@ -99,12 +102,11 @@ typedef struct bootloader_header
uint32_t flags;
uint16_t image_type; /* Defined in bootloader_image_t enum */
uint16_t resv0; /* pad */
-
- /* The next 4 fields are placed in compile-time, not by the utility */
- uint32_t got_address; /* compiled got address position in the image */
- uint32_t got_num_entries; /* number of got entries */
- uint32_t compiled_start; /* compaled start of the image address */
- uint32_t image_start; /* relocated start of image address */
+
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ uint32_t reserved4;
char comment_string[BOOTLOADER_HEADER_COMMENT_LEN]; /* Optional, for descriptive purposes */
char version_string[BOOTLOADER_HEADER_VERSION_LEN]; /* Optional, for descriptive purposes */
@@ -118,7 +120,7 @@ typedef struct bootloader_header
typedef enum
{
- BL_HEADER_IMAGE_UKNOWN = 0x0,
+ BL_HEADER_IMAGE_UNKNOWN = 0x0,
BL_HEADER_IMAGE_STAGE2, /* Binary bootloader stage2 image (NAND boot) */
BL_HEADER_IMAGE_STAGE3, /* Binary bootloader stage3 image (NAND boot)*/
BL_HEADER_IMAGE_NOR, /* Binary bootloader for NOR boot */
@@ -136,6 +138,8 @@ typedef enum
** by stage1 and stage2. */
#define MAX_NAND_SEARCH_ADDR 0x400000
+/* Maximum address to look for start of normal bootloader */
+#define MAX_NOR_SEARCH_ADDR 0x100000
/* Defines for RAM based environment set by the host or the previous bootloader
** in a chain boot configuration. */
diff --git a/cvmx-bootmem.c b/cvmx-bootmem.c
index 55aea601b70d..bb373fbaa0ea 100644
--- a/cvmx-bootmem.c
+++ b/cvmx-bootmem.c
@@ -1,62 +1,70 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+
/**
* @file
* Simple allocate only memory allocator. Used to allocate memory at application
* start time.
*
- * <hr>$Revision: 41586 $<hr>
+ * <hr>$Revision: 52119 $<hr>
*
*/
-
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <linux/module.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#else
+#include "executive-config.h"
#include "cvmx.h"
-#include "cvmx-spinlock.h"
#include "cvmx-bootmem.h"
+#endif
+typedef uint32_t cvmx_spinlock_t;
//#define DEBUG
-
+#define ULL unsigned long long
#undef MAX
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
@@ -65,7 +73,192 @@
#define ALIGN_ADDR_UP(addr, align) (((addr) + (~(align))) & (align))
-static CVMX_SHARED cvmx_bootmem_desc_t *cvmx_bootmem_desc = NULL;
+/**
+ * This is the physical location of a cvmx_bootmem_desc_t
+ * structure in Octeon's memory. Note that dues to addressing
+ * limits or runtime environment it might not be possible to
+ * create a C pointer to this structure.
+ */
+static CVMX_SHARED uint64_t cvmx_bootmem_desc_addr = 0;
+
+/**
+ * This macro returns the size of a member of a structure.
+ * Logically it is the same as "sizeof(s::field)" in C++, but
+ * C lacks the "::" operator.
+ */
+#define SIZEOF_FIELD(s, field) sizeof(((s*)NULL)->field)
+
+/**
+ * This macro returns a member of the cvmx_bootmem_desc_t
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the cvmx_bootmem_desc_t to read.
+ * Regardless of the type of the field, the return type is always
+ * a uint64_t.
+ */
+#define CVMX_BOOTMEM_DESC_GET_FIELD(field) \
+ __cvmx_bootmem_desc_get(cvmx_bootmem_desc_addr, \
+ offsetof(cvmx_bootmem_desc_t, field), \
+ SIZEOF_FIELD(cvmx_bootmem_desc_t, field))
+
+/**
+ * This macro writes a member of the cvmx_bootmem_desc_t
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the cvmx_bootmem_desc_t to write.
+ */
+#define CVMX_BOOTMEM_DESC_SET_FIELD(field, value) \
+ __cvmx_bootmem_desc_set(cvmx_bootmem_desc_addr, \
+ offsetof(cvmx_bootmem_desc_t, field), \
+ SIZEOF_FIELD(cvmx_bootmem_desc_t, field), value)
+
+/**
+ * This macro returns a member of the
+ * cvmx_bootmem_named_block_desc_t structure. These members can't
+ * be directly addressed as they might be in memory not directly
+ * reachable. In the case where bootmem is compiled with
+ * LINUX_HOST, the structure itself might be located on a remote
+ * Octeon. The argument "field" is the member name of the
+ * cvmx_bootmem_named_block_desc_t to read. Regardless of the type
+ * of the field, the return type is always a uint64_t. The "addr"
+ * parameter is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field) \
+ __cvmx_bootmem_desc_get(addr, \
+ offsetof(cvmx_bootmem_named_block_desc_t, field), \
+ SIZEOF_FIELD(cvmx_bootmem_named_block_desc_t, field))
+
+/**
+ * This macro writes a member of the cvmx_bootmem_named_block_desc_t
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the
+ * cvmx_bootmem_named_block_desc_t to write. The "addr" parameter
+ * is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_SET_FIELD(addr, field, value) \
+ __cvmx_bootmem_desc_set(addr, \
+ offsetof(cvmx_bootmem_named_block_desc_t, field), \
+ SIZEOF_FIELD(cvmx_bootmem_named_block_desc_t, field), value)
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a uint64_t.
+ */
+static inline uint64_t __cvmx_bootmem_desc_get(uint64_t base, int offset, int size)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size)
+ {
+ case 4:
+ return cvmx_read64_uint32(base);
+ case 8:
+ return cvmx_read64_uint64(base);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * This function is the implementation of the set macros defined
+ * for individual structure members. The argument are generated
+ * by the macros in order to write only the needed memory.
+ *
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ * @param value Value to write into the structure
+ */
+static inline void __cvmx_bootmem_desc_set(uint64_t base, int offset, int size, uint64_t value)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size)
+ {
+ case 4:
+ cvmx_write64_uint32(base, value);
+ break;
+ case 8:
+ cvmx_write64_uint64(base, value);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * This function retrieves the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessable.
+ *
+ * @param addr Physical address of the named block descriptor
+ * @param str String to receive the named block string name
+ * @param len Length of the string buffer, which must match the length
+ * stored in the bootmem descriptor.
+ */
+static void CVMX_BOOTMEM_NAMED_GET_NAME(uint64_t addr, char *str, int len)
+{
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ int l = len;
+ char *ptr = str;
+ addr |= (1ull << 63);
+ addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
+ while (l--)
+ *ptr++ = cvmx_read64_uint8(addr++);
+ str[len] = 0;
+#else
+ extern void octeon_remote_read_mem(void *buffer, uint64_t physical_address, int length);
+ addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
+ octeon_remote_read_mem(str, addr, len);
+ str[len] = 0;
+#endif
+}
+
+/**
+ * This function stores the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessable.
+ *
+ * @param addr Physical address of the named block descriptor
+ * @param str String to store into the named block string name
+ * @param len Length of the string buffer, which must match the length
+ * stored in the bootmem descriptor.
+ */
+static void CVMX_BOOTMEM_NAMED_SET_NAME(uint64_t addr, const char *str, int len)
+{
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ int l = len;
+ addr |= (1ull << 63);
+ addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
+ while (l--)
+ {
+ if (l)
+ cvmx_write64_uint8(addr++, *str++);
+ else
+ cvmx_write64_uint8(addr++, 0);
+ }
+#else
+ extern void octeon_remote_write_mem(uint64_t physical_address, const void *buffer, int length);
+ char zero = 0;
+ addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
+ octeon_remote_write_mem(addr, str, len-1);
+ octeon_remote_write_mem(addr+len-1, &zero, 1);
+#endif
+}
/* See header file for descriptions of functions */
@@ -92,7 +285,101 @@ static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
return(cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63)));
}
+/**
+ * Check the version information on the bootmem descriptor
+ *
+ * @param exact_match
+ * Exact major version to check against. A zero means
+ * check that the version supports named blocks.
+ *
+ * @return Zero if the version is correct. Negative if the version is
+ * incorrect. Failures also cause a message to be displayed.
+ */
+static int __cvmx_bootmem_check_version(int exact_match)
+{
+ int major_version;
+#ifdef CVMX_BUILD_FOR_LINUX_HOST
+ if (!cvmx_bootmem_desc_addr)
+ cvmx_bootmem_desc_addr = cvmx_read64_uint64(0x24100);
+#endif
+ major_version = CVMX_BOOTMEM_DESC_GET_FIELD(major_version);
+ if ((major_version > 3) || (exact_match && major_version != exact_match))
+ {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: 0x%llx\n",
+ major_version, (int)CVMX_BOOTMEM_DESC_GET_FIELD(minor_version),
+ (ULL)cvmx_bootmem_desc_addr);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+/**
+ * Get the low level bootmem descriptor lock. If no locking
+ * is specified in the flags, then nothing is done.
+ *
+ * @param flags CVMX_BOOTMEM_FLAG_NO_LOCKING means this functions should do
+ * nothing. This is used to support nested bootmem calls.
+ */
+static inline void __cvmx_bootmem_lock(uint32_t flags)
+{
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ {
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ /* Unfortunately we can't use the normal cvmx-spinlock code as the
+ memory for the bootmem descriptor may be not accessable by a C
+ pointer. We use a 64bit XKPHYS address to access the memory
+ directly */
+ uint64_t lock_addr = (1ull << 63) | (cvmx_bootmem_desc_addr + offsetof(cvmx_bootmem_desc_t, lock));
+ unsigned int tmp;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], 0(%[addr])\n"
+ " bnez %[tmp], 1b \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], 0(%[addr])\n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [tmp] "=&r" (tmp)
+ : [addr] "r" (lock_addr)
+ : "memory");
+#endif
+ }
+}
+/**
+ * Release the low level bootmem descriptor lock. If no locking
+ * is specified in the flags, then nothing is done.
+ *
+ * @param flags CVMX_BOOTMEM_FLAG_NO_LOCKING means this functions should do
+ * nothing. This is used to support nested bootmem calls.
+ */
+static inline void __cvmx_bootmem_unlock(uint32_t flags)
+{
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ {
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ /* Unfortunately we can't use the normal cvmx-spinlock code as the
+ memory for the bootmem descriptor may be not accessable by a C
+ pointer. We use a 64bit XKPHYS address to access the memory
+ directly */
+ uint64_t lock_addr = (1ull << 63) | (cvmx_bootmem_desc_addr + offsetof(cvmx_bootmem_desc_t, lock));
+
+ CVMX_SYNCW;
+ __asm__ __volatile__("sw $0, 0(%[addr])\n"
+ :: [addr] "r" (lock_addr)
+ : "memory");
+ CVMX_SYNCW;
+#endif
+ }
+}
+
+/* Some of the cvmx-bootmem functions dealing with C pointers are not supported
+ when we are compiling for CVMX_BUILD_FOR_LINUX_HOST. This ifndef removes
+ these functions when they aren't needed */
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
/* This functions takes an address range and adjusts it as necessary to
** match the ABI that is currently being used. This is required to ensure
** that bootmem_alloc* functions only return valid pointers for 32 bit ABIs */
@@ -152,6 +439,9 @@ void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_t min_a
else
return NULL;
}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_bootmem_alloc_range);
+#endif
void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, uint64_t alignment)
{
@@ -163,8 +453,11 @@ void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
{
return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
}
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+EXPORT_SYMBOL(cvmx_bootmem_alloc);
+#endif
-void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name)
+void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name)
{
int64_t addr;
@@ -176,23 +469,36 @@ void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t
return NULL;
}
-void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, char *name)
+void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, const char *name)
{
return(cvmx_bootmem_alloc_named_range(size, address, address + size, 0, name));
}
-void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name)
+void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, const char *name)
{
return(cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name));
}
-int cvmx_bootmem_free_named(char *name)
+int cvmx_bootmem_free_named(const char *name)
{
return(cvmx_bootmem_phy_named_block_free(name, 0));
}
+#endif
-cvmx_bootmem_named_block_desc_t * cvmx_bootmem_find_named_block(char *name)
+const cvmx_bootmem_named_block_desc_t *cvmx_bootmem_find_named_block(const char *name)
{
- return(cvmx_bootmem_phy_named_block_find(name, 0));
+ /* FIXME: Returning a single static object is probably a bad thing */
+ static cvmx_bootmem_named_block_desc_t desc;
+ uint64_t named_addr = cvmx_bootmem_phy_named_block_find(name, 0);
+ if (named_addr)
+ {
+ desc.base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, base_addr);
+ desc.size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, size);
+ strncpy(desc.name, name, sizeof(desc.name));
+ desc.name[sizeof(desc.name)-1] = 0;
+ return &desc;
+ }
+ else
+ return NULL;
}
void cvmx_bootmem_print_named(void)
@@ -200,11 +506,7 @@ void cvmx_bootmem_print_named(void)
cvmx_bootmem_phy_named_block_print();
}
-#if defined(__linux__) && defined(CVMX_ABI_N32)
-cvmx_bootmem_named_block_desc_t *linux32_named_block_array_ptr;
-#endif
-
-int cvmx_bootmem_init(void *mem_desc_ptr)
+int cvmx_bootmem_init(uint64_t mem_desc_addr)
{
/* Verify that the size of cvmx_spinlock_t meets our assumptions */
if (sizeof(cvmx_spinlock_t) != 4)
@@ -212,75 +514,8 @@ int cvmx_bootmem_init(void *mem_desc_ptr)
cvmx_dprintf("ERROR: Unexpected size of cvmx_spinlock_t\n");
return(-1);
}
-
- /* Here we set the global pointer to the bootmem descriptor block. This pointer will
- ** be used directly, so we will set it up to be directly usable by the application.
- ** It is set up as follows for the various runtime/ABI combinations:
- ** Linux 64 bit: Set XKPHYS bit
- ** Linux 32 bit: use mmap to create mapping, use virtual address
- ** CVMX 64 bit: use physical address directly
- ** CVMX 32 bit: use physical address directly
- ** Note that the CVMX environment assumes the use of 1-1 TLB mappings so that the physical addresses
- ** can be used directly
- */
- if (!cvmx_bootmem_desc)
- {
-#if defined(CVMX_BUILD_FOR_LINUX_USER) && defined(CVMX_ABI_N32)
- void *base_ptr;
- /* For 32 bit, we need to use mmap to create a mapping for the bootmem descriptor */
- int dm_fd = open("/dev/mem", O_RDWR);
- if (dm_fd < 0)
- {
- cvmx_dprintf("ERROR opening /dev/mem for boot descriptor mapping\n");
- return(-1);
- }
-
- base_ptr = mmap(NULL,
- sizeof(cvmx_bootmem_desc_t) + sysconf(_SC_PAGESIZE),
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- dm_fd,
- ((off_t)mem_desc_ptr) & ~(sysconf(_SC_PAGESIZE) - 1));
-
- if (MAP_FAILED == base_ptr)
- {
- cvmx_dprintf("Error mapping bootmem descriptor!\n");
- close(dm_fd);
- return(-1);
- }
-
- /* Adjust pointer to point to bootmem_descriptor, rather than start of page it is in */
- cvmx_bootmem_desc = (cvmx_bootmem_desc_t*)((char*)base_ptr + (((off_t)mem_desc_ptr) & (sysconf(_SC_PAGESIZE) - 1)));
-
- /* Also setup mapping for named memory block desc. while we are at it. Here we must keep another
- ** pointer around, as the value in the bootmem descriptor is shared with other applications. */
- base_ptr = mmap(NULL,
- sizeof(cvmx_bootmem_named_block_desc_t) * cvmx_bootmem_desc->named_block_num_blocks + sysconf(_SC_PAGESIZE),
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- dm_fd,
- ((off_t)cvmx_bootmem_desc->named_block_array_addr) & ~(sysconf(_SC_PAGESIZE) - 1));
-
- close(dm_fd);
-
- if (MAP_FAILED == base_ptr)
- {
- cvmx_dprintf("Error mapping named block descriptor!\n");
- return(-1);
- }
-
- /* Adjust pointer to point to named block array, rather than start of page it is in */
- linux32_named_block_array_ptr = (cvmx_bootmem_named_block_desc_t*)((char*)base_ptr + (((off_t)cvmx_bootmem_desc->named_block_array_addr) & (sysconf(_SC_PAGESIZE) - 1)));
-
-#elif (defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CVMX_BUILD_FOR_LINUX_USER)) && defined(CVMX_ABI_64)
- /* Set XKPHYS bit */
- cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
-#else
- cvmx_bootmem_desc = (cvmx_bootmem_desc_t*)mem_desc_ptr;
-#endif
- }
-
-
+ if (!cvmx_bootmem_desc_addr)
+ cvmx_bootmem_desc_addr = mem_desc_addr;
return(0);
}
@@ -316,15 +551,11 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
- (unsigned long long)req_size, (unsigned long long)address_min, (unsigned long long)address_max, (unsigned long long)alignment);
+ (ULL)req_size, (ULL)address_min, (ULL)address_max, (ULL)alignment);
#endif
- if (cvmx_bootmem_desc->major_version > 3)
- {
- cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
- (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
+ if (__cvmx_bootmem_check_version(0))
goto error_out;
- }
/* Do a variety of checks to validate the arguments. The allocator code will later assume
** that these checks have been made. We validate that the requested constraints are not
@@ -369,9 +600,8 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t
/* Walk through the list entries - first fit found is returned */
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
- head_addr = cvmx_bootmem_desc->head_addr;
+ __cvmx_bootmem_lock(flags);
+ head_addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
ent_addr = head_addr;
while (ent_addr)
{
@@ -381,7 +611,7 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t
if (cvmx_bootmem_phy_get_next(ent_addr) && ent_addr > cvmx_bootmem_phy_get_next(ent_addr))
{
cvmx_dprintf("Internal bootmem_alloc() error: ent: 0x%llx, next: 0x%llx\n",
- (unsigned long long)ent_addr, (unsigned long long)cvmx_bootmem_phy_get_next(ent_addr));
+ (ULL)ent_addr, (ULL)cvmx_bootmem_phy_get_next(ent_addr));
goto error_out;
}
@@ -430,10 +660,9 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t
else
{
/* head of list being returned, so update head ptr */
- cvmx_bootmem_desc->head_addr = cvmx_bootmem_phy_get_next(ent_addr);
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, cvmx_bootmem_phy_get_next(ent_addr));
}
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ __cvmx_bootmem_unlock(flags);
return(desired_min_addr);
}
@@ -458,8 +687,7 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t
}
error_out:
/* We didn't find anything, so return error */
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ __cvmx_bootmem_unlock(flags);
return(-1);
}
@@ -472,23 +700,18 @@ int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
int retval = 0;
#ifdef DEBUG
- cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n", (unsigned long long)phy_addr, (unsigned long long)size);
+ cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n", (ULL)phy_addr, (ULL)size);
#endif
- if (cvmx_bootmem_desc->major_version > 3)
- {
- cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
- (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
+ if (__cvmx_bootmem_check_version(0))
return(0);
- }
/* 0 is not a valid size for this allocator */
if (!size)
return(0);
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
- cur_addr = cvmx_bootmem_desc->head_addr;
+ __cvmx_bootmem_lock(flags);
+ cur_addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
if (cur_addr == 0 || phy_addr < cur_addr)
{
/* add at front of list - special case with changing head ptr */
@@ -499,7 +722,7 @@ int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
/* Add to front of existing first block */
cvmx_bootmem_phy_set_next(phy_addr, cvmx_bootmem_phy_get_next(cur_addr));
cvmx_bootmem_phy_set_size(phy_addr, cvmx_bootmem_phy_get_size(cur_addr) + size);
- cvmx_bootmem_desc->head_addr = phy_addr;
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, phy_addr);
}
else
@@ -507,7 +730,7 @@ int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
/* New block before first block */
cvmx_bootmem_phy_set_next(phy_addr, cur_addr); /* OK if cur_addr is 0 */
cvmx_bootmem_phy_set_size(phy_addr, size);
- cvmx_bootmem_desc->head_addr = phy_addr;
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, phy_addr);
}
retval = 1;
goto bootmem_free_done;
@@ -575,8 +798,7 @@ int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
retval = 1;
bootmem_free_done:
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ __cvmx_bootmem_unlock(flags);
return(retval);
}
@@ -587,11 +809,13 @@ void cvmx_bootmem_phy_list_print(void)
{
uint64_t addr;
- addr = cvmx_bootmem_desc->head_addr;
- cvmx_dprintf("\n\n\nPrinting bootmem block list, descriptor: %p, head is 0x%llx\n",
- cvmx_bootmem_desc, (unsigned long long)addr);
- cvmx_dprintf("Descriptor version: %d.%d\n", (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version);
- if (cvmx_bootmem_desc->major_version > 3)
+ addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
+ cvmx_dprintf("\n\n\nPrinting bootmem block list, descriptor: 0x%llx, head is 0x%llx\n",
+ (ULL)cvmx_bootmem_desc_addr, (ULL)addr);
+ cvmx_dprintf("Descriptor version: %d.%d\n",
+ (int)CVMX_BOOTMEM_DESC_GET_FIELD(major_version),
+ (int)CVMX_BOOTMEM_DESC_GET_FIELD(minor_version));
+ if (CVMX_BOOTMEM_DESC_GET_FIELD(major_version) > 3)
{
cvmx_dprintf("Warning: Bootmem descriptor version is newer than expected\n");
}
@@ -602,9 +826,9 @@ void cvmx_bootmem_phy_list_print(void)
while (addr)
{
cvmx_dprintf("Block address: 0x%08qx, size: 0x%08qx, next: 0x%08qx\n",
- (unsigned long long)addr,
- (unsigned long long)cvmx_bootmem_phy_get_size(addr),
- (unsigned long long)cvmx_bootmem_phy_get_next(addr));
+ (ULL)addr,
+ (ULL)cvmx_bootmem_phy_get_size(addr),
+ (ULL)cvmx_bootmem_phy_get_next(addr));
addr = cvmx_bootmem_phy_get_next(addr);
}
cvmx_dprintf("\n\n");
@@ -618,155 +842,130 @@ uint64_t cvmx_bootmem_phy_available_mem(uint64_t min_block_size)
uint64_t available_mem = 0;
- cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
- addr = cvmx_bootmem_desc->head_addr;
+ __cvmx_bootmem_lock(0);
+ addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
while (addr)
{
if (cvmx_bootmem_phy_get_size(addr) >= min_block_size)
available_mem += cvmx_bootmem_phy_get_size(addr);
addr = cvmx_bootmem_phy_get_next(addr);
}
- cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ __cvmx_bootmem_unlock(0);
return(available_mem);
}
-cvmx_bootmem_named_block_desc_t * cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
+uint64_t cvmx_bootmem_phy_named_block_find(const char *name, uint32_t flags)
{
- unsigned int i;
- cvmx_bootmem_named_block_desc_t *named_block_array_ptr;
-
+ uint64_t result = 0;
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
#endif
- /* Lock the structure to make sure that it is not being changed while we are
- ** examining it.
- */
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
-
-#if defined(__linux__) && !defined(CONFIG_OCTEON_U_BOOT)
-#ifdef CVMX_ABI_N32
- /* Need to use mmapped named block pointer in 32 bit linux apps */
-extern cvmx_bootmem_named_block_desc_t *linux32_named_block_array_ptr;
- named_block_array_ptr = linux32_named_block_array_ptr;
-#else
- /* Use XKPHYS for 64 bit linux */
- named_block_array_ptr = (cvmx_bootmem_named_block_desc_t *)cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
-#endif
-#else
- /* Simple executive case. (and u-boot)
- ** This could be in the low 1 meg of memory that is not 1-1 mapped, so we need use XKPHYS/KSEG0 addressing for it */
- named_block_array_ptr = CASTPTR(cvmx_bootmem_named_block_desc_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,cvmx_bootmem_desc->named_block_array_addr));
-#endif
-
-#ifdef DEBUG
- cvmx_dprintf("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n", named_block_array_ptr);
-#endif
- if (cvmx_bootmem_desc->major_version == 3)
+ __cvmx_bootmem_lock(flags);
+ if (!__cvmx_bootmem_check_version(3))
{
- for (i = 0; i < cvmx_bootmem_desc->named_block_num_blocks; i++)
+ int i;
+ uint64_t named_block_array_addr = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_array_addr);
+ int num_blocks = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_num_blocks);
+ int name_length = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len);
+ uint64_t named_addr = named_block_array_addr;
+ for (i = 0; i < num_blocks; i++)
{
- if ((name && named_block_array_ptr[i].size && !strncmp(name, named_block_array_ptr[i].name, cvmx_bootmem_desc->named_block_name_len - 1))
- || (!name && !named_block_array_ptr[i].size))
+ uint64_t named_size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, size);
+ if (name && named_size)
{
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
-
- return(&(named_block_array_ptr[i]));
+ char name_tmp[name_length];
+ CVMX_BOOTMEM_NAMED_GET_NAME(named_addr, name_tmp, name_length);
+ if (!strncmp(name, name_tmp, name_length - 1))
+ {
+ result = named_addr;
+ break;
+ }
+ }
+ else if (!name && !named_size)
+ {
+ result = named_addr;
+ break;
}
+ named_addr += sizeof(cvmx_bootmem_named_block_desc_t);
}
}
- else
- {
- cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
- (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
- }
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
-
- return(NULL);
+ __cvmx_bootmem_unlock(flags);
+ return result;
}
-int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
+int cvmx_bootmem_phy_named_block_free(const char *name, uint32_t flags)
{
- cvmx_bootmem_named_block_desc_t *named_block_ptr;
+ uint64_t named_block_addr;
- if (cvmx_bootmem_desc->major_version != 3)
- {
- cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
- (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
+ if (__cvmx_bootmem_check_version(3))
return(0);
- }
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
#endif
/* Take lock here, as name lookup/block free/name free need to be atomic */
- cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ __cvmx_bootmem_lock(flags);
- named_block_ptr = cvmx_bootmem_phy_named_block_find(name, CVMX_BOOTMEM_FLAG_NO_LOCKING);
- if (named_block_ptr)
+ named_block_addr = cvmx_bootmem_phy_named_block_find(name, CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (named_block_addr)
{
+ uint64_t named_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, base_addr);
+ uint64_t named_size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, size);
#ifdef DEBUG
- cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s, base: 0x%llx, size: 0x%llx\n", name, (unsigned long long)named_block_ptr->base_addr, (unsigned long long)named_block_ptr->size);
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s, base: 0x%llx, size: 0x%llx\n",
+ name, (ULL)named_addr, (ULL)named_size);
#endif
- __cvmx_bootmem_phy_free(named_block_ptr->base_addr, named_block_ptr->size, CVMX_BOOTMEM_FLAG_NO_LOCKING);
- named_block_ptr->size = 0;
+ __cvmx_bootmem_phy_free(named_addr, named_size, CVMX_BOOTMEM_FLAG_NO_LOCKING);
/* Set size to zero to indicate block not used. */
+ CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_addr, size, 0);
}
-
- cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
-
- return(!!named_block_ptr); /* 0 on failure, 1 on success */
+ __cvmx_bootmem_unlock(flags);
+ return(!!named_block_addr); /* 0 on failure, 1 on success */
}
-int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, char *name, uint32_t flags)
+int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, const char *name, uint32_t flags)
{
int64_t addr_allocated;
- cvmx_bootmem_named_block_desc_t *named_block_desc_ptr;
+ uint64_t named_block_desc_addr;
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: 0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
- (unsigned long long)size,
- (unsigned long long)min_addr,
- (unsigned long long)max_addr,
- (unsigned long long)alignment,
+ (ULL)size,
+ (ULL)min_addr,
+ (ULL)max_addr,
+ (ULL)alignment,
name);
#endif
- if (cvmx_bootmem_desc->major_version != 3)
- {
- cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
- (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
- return(-1);
- }
+ if (__cvmx_bootmem_check_version(3))
+ return(-1);
/* Take lock here, as name lookup/block alloc/name add need to be atomic */
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
-
- /* Get pointer to first available named block descriptor */
- named_block_desc_ptr = cvmx_bootmem_phy_named_block_find(NULL, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ __cvmx_bootmem_lock(flags);
- /* Check to see if name already in use, return error if name
- ** not available or no more room for blocks.
- */
- if (cvmx_bootmem_phy_named_block_find(name, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr)
+ named_block_desc_addr = cvmx_bootmem_phy_named_block_find(name, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (named_block_desc_addr)
{
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ __cvmx_bootmem_unlock(flags);
return(-1);
}
+ /* Get pointer to first available named block descriptor */
+ named_block_desc_addr = cvmx_bootmem_phy_named_block_find(NULL, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (!named_block_desc_addr)
+ {
+ __cvmx_bootmem_unlock(flags);
+ return(-1);
+ }
/* Round size up to mult of minimum alignment bytes
** We need the actual size allocated to allow for blocks to be coallesced
@@ -777,15 +976,12 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uin
addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
if (addr_allocated >= 0)
{
- named_block_desc_ptr->base_addr = addr_allocated;
- named_block_desc_ptr->size = size;
- strncpy(named_block_desc_ptr->name, name, cvmx_bootmem_desc->named_block_name_len);
- named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;
+ CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_desc_addr, base_addr, addr_allocated);
+ CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_desc_addr, size, size);
+ CVMX_BOOTMEM_NAMED_SET_NAME(named_block_desc_addr, name, CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len));
}
- if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
- cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
-
+ __cvmx_bootmem_unlock(flags);
return(addr_allocated);
}
@@ -794,45 +990,34 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uin
void cvmx_bootmem_phy_named_block_print(void)
{
- unsigned int i;
+ int i;
int printed = 0;
-#if defined(__linux__) && !defined(CONFIG_OCTEON_U_BOOT)
-#ifdef CVMX_ABI_N32
- /* Need to use mmapped named block pointer in 32 bit linux apps */
-extern cvmx_bootmem_named_block_desc_t *linux32_named_block_array_ptr;
- cvmx_bootmem_named_block_desc_t *named_block_array_ptr = linux32_named_block_array_ptr;
-#else
- /* Use XKPHYS for 64 bit linux */
- cvmx_bootmem_named_block_desc_t *named_block_array_ptr = (cvmx_bootmem_named_block_desc_t *)cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
-#endif
-#else
- /* Simple executive case. (and u-boot)
- ** This could be in the low 1 meg of memory that is not 1-1 mapped, so we need use XKPHYS/KSEG0 addressing for it */
- cvmx_bootmem_named_block_desc_t *named_block_array_ptr = CASTPTR(cvmx_bootmem_named_block_desc_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,cvmx_bootmem_desc->named_block_array_addr));
-#endif
+ uint64_t named_block_array_addr = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_array_addr);
+ int num_blocks = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_num_blocks);
+ int name_length = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len);
+ uint64_t named_block_addr = named_block_array_addr;
+
#ifdef DEBUG
- cvmx_dprintf("cvmx_bootmem_phy_named_block_print, desc addr: %p\n", cvmx_bootmem_desc);
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_print, desc addr: 0x%llx\n",
+ (ULL)cvmx_bootmem_desc_addr);
#endif
- if (cvmx_bootmem_desc->major_version != 3)
- {
- cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
- (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
+ if (__cvmx_bootmem_check_version(3))
return;
- }
cvmx_dprintf("List of currently allocated named bootmem blocks:\n");
- for (i = 0; i < cvmx_bootmem_desc->named_block_num_blocks; i++)
+ for (i = 0; i < num_blocks; i++)
{
- if (named_block_array_ptr[i].size)
+ uint64_t named_size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, size);
+ if (named_size)
{
+ char name_tmp[name_length];
+ uint64_t named_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, base_addr);
+ CVMX_BOOTMEM_NAMED_GET_NAME(named_block_addr, name_tmp, name_length);
printed++;
cvmx_dprintf("Name: %s, address: 0x%08qx, size: 0x%08qx, index: %d\n",
- named_block_array_ptr[i].name,
- (unsigned long long)named_block_array_ptr[i].base_addr,
- (unsigned long long)named_block_array_ptr[i].size,
- i);
-
+ name_tmp, (ULL)named_addr, (ULL)named_size, i);
}
+ named_block_addr += sizeof(cvmx_bootmem_named_block_desc_t);
}
if (!printed)
{
@@ -845,18 +1030,20 @@ extern cvmx_bootmem_named_block_desc_t *linux32_named_block_array_ptr;
/* Real physical addresses of memory regions */
#define OCTEON_DDR0_BASE (0x0ULL)
#define OCTEON_DDR0_SIZE (0x010000000ULL)
-#define OCTEON_DDR1_BASE (0x410000000ULL)
+#define OCTEON_DDR1_BASE (OCTEON_IS_MODEL(OCTEON_CN6XXX) ? 0x20000000ULL : 0x410000000ULL)
#define OCTEON_DDR1_SIZE (0x010000000ULL)
-#define OCTEON_DDR2_BASE (0x020000000ULL)
-#define OCTEON_DDR2_SIZE (0x3e0000000ULL)
-#define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
+#define OCTEON_DDR2_BASE (OCTEON_IS_MODEL(OCTEON_CN6XXX) ? 0x30000000ULL : 0x20000000ULL)
+#define OCTEON_DDR2_SIZE (OCTEON_IS_MODEL(OCTEON_CN6XXX) ? 0x7d0000000ULL : 0x3e0000000ULL)
+#define OCTEON_MAX_PHY_MEM_SIZE (OCTEON_IS_MODEL(OCTEON_CN63XX) ? 32*1024*1024*1024ULL : 16*1024*1024*1024ULL)
int64_t cvmx_bootmem_phy_mem_list_init(uint64_t mem_size, uint32_t low_reserved_bytes, cvmx_bootmem_desc_t *desc_buffer)
{
uint64_t cur_block_addr;
int64_t addr;
+ int i;
#ifdef DEBUG
- cvmx_dprintf("cvmx_bootmem_phy_mem_list_init (arg desc ptr: %p, cvmx_bootmem_desc: %p)\n", desc_buffer, cvmx_bootmem_desc);
+ cvmx_dprintf("cvmx_bootmem_phy_mem_list_init (arg desc ptr: %p, cvmx_bootmem_desc: 0x%llx)\n",
+ desc_buffer, (ULL)cvmx_bootmem_desc_addr);
#endif
/* Descriptor buffer needs to be in 32 bit addressable space to be compatible with
@@ -873,21 +1060,27 @@ int64_t cvmx_bootmem_phy_mem_list_init(uint64_t mem_size, uint32_t low_reserved_
cvmx_dprintf("ERROR: requested memory size too large, truncating to maximum size\n");
}
- if (cvmx_bootmem_desc)
+ if (cvmx_bootmem_desc_addr)
return 1;
/* Initialize cvmx pointer to descriptor */
- cvmx_bootmem_init(desc_buffer);
-
- /* Set up global pointer to start of list, exclude low 64k for exception vectors, space for global descriptor */
- memset(cvmx_bootmem_desc, 0x0, sizeof(cvmx_bootmem_desc_t));
- /* Set version of bootmem descriptor */
- cvmx_bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
- cvmx_bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+ cvmx_bootmem_init(cvmx_ptr_to_phys(desc_buffer));
+#else
+ cvmx_bootmem_init((unsigned long)desc_buffer);
+#endif
- cur_block_addr = cvmx_bootmem_desc->head_addr = (OCTEON_DDR0_BASE + low_reserved_bytes);
+ /* Fill the bootmem descriptor */
+ CVMX_BOOTMEM_DESC_SET_FIELD(lock, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(flags, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(major_version, CVMX_BOOTMEM_DESC_MAJ_VER);
+ CVMX_BOOTMEM_DESC_SET_FIELD(minor_version, CVMX_BOOTMEM_DESC_MIN_VER);
+ CVMX_BOOTMEM_DESC_SET_FIELD(app_data_addr, 0);
+ CVMX_BOOTMEM_DESC_SET_FIELD(app_data_size, 0);
- cvmx_bootmem_desc->head_addr = 0;
+ /* Set up global pointer to start of list, exclude low 64k for exception vectors, space for global descriptor */
+ cur_block_addr = (OCTEON_DDR0_BASE + low_reserved_bytes);
if (mem_size <= OCTEON_DDR0_SIZE)
{
@@ -913,24 +1106,30 @@ int64_t cvmx_bootmem_phy_mem_list_init(uint64_t mem_size, uint32_t low_reserved_
frees_done:
/* Initialize the named block structure */
- cvmx_bootmem_desc->named_block_name_len = CVMX_BOOTMEM_NAME_LEN;
- cvmx_bootmem_desc->named_block_num_blocks = CVMX_BOOTMEM_NUM_NAMED_BLOCKS;
- cvmx_bootmem_desc->named_block_array_addr = 0;
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_name_len, CVMX_BOOTMEM_NAME_LEN);
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_num_blocks, CVMX_BOOTMEM_NUM_NAMED_BLOCKS);
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_array_addr, 0);
/* Allocate this near the top of the low 256 MBytes of memory */
addr = cvmx_bootmem_phy_alloc(CVMX_BOOTMEM_NUM_NAMED_BLOCKS * sizeof(cvmx_bootmem_named_block_desc_t),0, 0x10000000, 0 ,CVMX_BOOTMEM_FLAG_END_ALLOC);
if (addr >= 0)
- cvmx_bootmem_desc->named_block_array_addr = addr;
+ CVMX_BOOTMEM_DESC_SET_FIELD(named_block_array_addr, addr);
#ifdef DEBUG
- cvmx_dprintf("cvmx_bootmem_phy_mem_list_init: named_block_array_addr: 0x%llx)\n", (unsigned long long)cvmx_bootmem_desc->named_block_array_addr);
+ cvmx_dprintf("cvmx_bootmem_phy_mem_list_init: named_block_array_addr: 0x%llx)\n",
+ (ULL)addr);
#endif
- if (!cvmx_bootmem_desc->named_block_array_addr)
+ if (!addr)
{
cvmx_dprintf("FATAL ERROR: unable to allocate memory for bootmem descriptor!\n");
return(0);
}
- memset((void *)(unsigned long)cvmx_bootmem_desc->named_block_array_addr, 0x0, CVMX_BOOTMEM_NUM_NAMED_BLOCKS * sizeof(cvmx_bootmem_named_block_desc_t));
+ for (i=0; i<CVMX_BOOTMEM_NUM_NAMED_BLOCKS; i++)
+ {
+ CVMX_BOOTMEM_NAMED_SET_FIELD(addr, base_addr, 0);
+ CVMX_BOOTMEM_NAMED_SET_FIELD(addr, size, 0);
+ addr += sizeof(cvmx_bootmem_named_block_desc_t);
+ }
return(1);
}
@@ -938,15 +1137,17 @@ frees_done:
void cvmx_bootmem_lock(void)
{
- cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ __cvmx_bootmem_lock(0);
}
void cvmx_bootmem_unlock(void)
{
- cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ __cvmx_bootmem_unlock(0);
}
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
void *__cvmx_bootmem_internal_get_desc_ptr(void)
{
- return(cvmx_bootmem_desc);
+ return cvmx_phys_to_ptr(cvmx_bootmem_desc_addr);
}
+#endif
diff --git a/cvmx-bootmem.h b/cvmx-bootmem.h
index b44ea02752bb..2f07990e5c12 100644
--- a/cvmx-bootmem.h
+++ b/cvmx-bootmem.h
@@ -1,51 +1,53 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
- *
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+
/**
* @file
* Simple allocate only memory allocator. Used to allocate memory at application
* start time.
*
- * <hr>$Revision: 41586 $<hr>
+ * <hr>$Revision: 49448 $<hr>
*
*/
@@ -89,14 +91,14 @@ typedef struct
** Note: This structure must be naturally 64 bit aligned, as a single
** memory image will be used by both 32 and 64 bit programs.
*/
-typedef struct
+struct cvmx_bootmem_named_block_desc
{
uint64_t base_addr; /**< Base address of named block */
uint64_t size; /**< Size actually allocated for named block (may differ from requested) */
char name[CVMX_BOOTMEM_NAME_LEN]; /**< name of named block */
-} cvmx_bootmem_named_block_desc_t;
-
+};
+typedef struct cvmx_bootmem_named_block_desc cvmx_bootmem_named_block_desc_t;
/* Current descriptor versions */
#define CVMX_BOOTMEM_DESC_MAJ_VER 3 /* CVMX bootmem descriptor major version */
@@ -127,10 +129,10 @@ typedef struct
* Initialize the boot alloc memory structures. This is
* normally called inside of cvmx_user_app_init()
*
- * @param mem_desc_ptr Address of the free memory list
+ * @param mem_desc_addr Address of the free memory list
* @return
*/
-extern int cvmx_bootmem_init(void *mem_desc_ptr);
+extern int cvmx_bootmem_init(uint64_t mem_desc_addr);
/**
@@ -190,7 +192,7 @@ extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_
*
* @return pointer to block of memory, NULL on error
*/
-extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name);
+extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, const char *name);
@@ -207,7 +209,7 @@ extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *n
*
* @return pointer to block of memory, NULL on error
*/
-extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, char *name);
+extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, const char *name);
@@ -226,7 +228,7 @@ extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, c
*
* @return pointer to block of memory, NULL on error
*/
-extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name);
+extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name);
/**
* Frees a previously allocated named bootmem block.
@@ -236,7 +238,7 @@ extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, ui
* @return 0 on failure,
* !0 on success
*/
-extern int cvmx_bootmem_free_named(char *name);
+extern int cvmx_bootmem_free_named(const char *name);
/**
@@ -247,7 +249,7 @@ extern int cvmx_bootmem_free_named(char *name);
* @return pointer to named block descriptor on success
* 0 on failure
*/
-cvmx_bootmem_named_block_desc_t * cvmx_bootmem_find_named_block(char *name);
+const cvmx_bootmem_named_block_desc_t *cvmx_bootmem_find_named_block(const char *name);
@@ -310,7 +312,7 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t
*
* @return physical address of block allocated, or -1 on failure
*/
-int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, char *name, uint32_t flags);
+int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, const char *name, uint32_t flags);
/**
@@ -319,13 +321,13 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uin
*
* @param name Name of memory block to find.
* If NULL pointer given, then finds unused descriptor, if available.
- * @param flags Flags to control options for the allocation.
+ * @param flags Flags to control options for the allocation.
*
- * @return Pointer to memory block descriptor, NULL if not found.
- * If NULL returned when name parameter is NULL, then no memory
- * block descriptors are available.
+ * @return Physical address of the memory block descriptor, zero if not
+ * found. If zero returned when name parameter is NULL, then no
+ * memory block descriptors are available.
*/
-cvmx_bootmem_named_block_desc_t * cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
+uint64_t cvmx_bootmem_phy_named_block_find(const char *name, uint32_t flags);
/**
@@ -349,7 +351,7 @@ uint64_t cvmx_bootmem_phy_available_mem(uint64_t min_block_size);
* @return 0 on failure
* 1 on success
*/
-int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags);
+int cvmx_bootmem_phy_named_block_free(const char *name, uint32_t flags);
/**
* Frees a block to the bootmem allocator list. This must
diff --git a/cvmx-ciu-defs.h b/cvmx-ciu-defs.h
new file mode 100644
index 000000000000..05c03ff4eef2
--- /dev/null
+++ b/cvmx-ciu-defs.h
@@ -0,0 +1,5527 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-ciu-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon ciu.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision$<hr>
+ *
+ */
+#ifndef __CVMX_CIU_TYPEDEFS_H__
+#define __CVMX_CIU_TYPEDEFS_H__
+
+#define CVMX_CIU_BIST (CVMX_ADD_IO_SEG(0x0001070000000730ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_BLOCK_INT CVMX_CIU_BLOCK_INT_FUNC()
+static inline uint64_t CVMX_CIU_BLOCK_INT_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_CIU_BLOCK_INT not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000007C0ull);
+}
+#else
+#define CVMX_CIU_BLOCK_INT (CVMX_ADD_IO_SEG(0x00010700000007C0ull))
+#endif
+#define CVMX_CIU_DINT (CVMX_ADD_IO_SEG(0x0001070000000720ull))
+#define CVMX_CIU_FUSE (CVMX_ADD_IO_SEG(0x0001070000000728ull))
+#define CVMX_CIU_GSTOP (CVMX_ADD_IO_SEG(0x0001070000000710ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_INT33_SUM0 CVMX_CIU_INT33_SUM0_FUNC()
+static inline uint64_t CVMX_CIU_INT33_SUM0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_CIU_INT33_SUM0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000110ull);
+}
+#else
+#define CVMX_CIU_INT33_SUM0 (CVMX_ADD_IO_SEG(0x0001070000000110ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000200ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN0(offset) (CVMX_ADD_IO_SEG(0x0001070000000200ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN0_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN0_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000002200ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002200ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN0_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN0_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000006200ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006200ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000208ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN1(offset) (CVMX_ADD_IO_SEG(0x0001070000000208ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN1_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN1_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000002208ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002208ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN1_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || ((offset >= 32) && (offset <= 33))))))
+ cvmx_warn("CVMX_CIU_INTX_EN1_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000006208ull) + ((offset) & 63) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006208ull) + ((offset) & 63) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000C80ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_0(offset) (CVMX_ADD_IO_SEG(0x0001070000000C80ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_0_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_0_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000002C80ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C80ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_0_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_0_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000006C80ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C80ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_1(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_1(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000C88ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_1(offset) (CVMX_ADD_IO_SEG(0x0001070000000C88ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_1_W1C(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_1_W1C(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000002C88ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C88ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_EN4_1_W1S(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_1_W1S(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000006C88ull) + ((offset) & 15) * 16;
+}
+#else
+#define CVMX_CIU_INTX_EN4_1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C88ull) + ((offset) & 15) * 16)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_SUM0(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 11) || (offset == 32)))))
+ cvmx_warn("CVMX_CIU_INTX_SUM0(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000000ull) + ((offset) & 63) * 8;
+}
+#else
+#define CVMX_CIU_INTX_SUM0(offset) (CVMX_ADD_IO_SEG(0x0001070000000000ull) + ((offset) & 63) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_INTX_SUM4(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_INTX_SUM4(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000C00ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_INTX_SUM4(offset) (CVMX_ADD_IO_SEG(0x0001070000000C00ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_INT_DBG_SEL CVMX_CIU_INT_DBG_SEL_FUNC()
+static inline uint64_t CVMX_CIU_INT_DBG_SEL_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_CIU_INT_DBG_SEL not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x00010700000007D0ull);
+}
+#else
+#define CVMX_CIU_INT_DBG_SEL (CVMX_ADD_IO_SEG(0x00010700000007D0ull))
+#endif
+#define CVMX_CIU_INT_SUM1 (CVMX_ADD_IO_SEG(0x0001070000000108ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_MBOX_CLRX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_MBOX_CLRX(offset) (CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 15) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_MBOX_SETX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_MBOX_SETX(offset) (CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 15) * 8)
+#endif
+#define CVMX_CIU_NMI (CVMX_ADD_IO_SEG(0x0001070000000718ull))
+#define CVMX_CIU_PCI_INTA (CVMX_ADD_IO_SEG(0x0001070000000750ull))
+#define CVMX_CIU_PP_DBG (CVMX_ADD_IO_SEG(0x0001070000000708ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_PP_POKEX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_PP_POKEX(offset) (CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 15) * 8)
+#endif
+#define CVMX_CIU_PP_RST (CVMX_ADD_IO_SEG(0x0001070000000700ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM0 CVMX_CIU_QLM0_FUNC()
+static inline uint64_t CVMX_CIU_QLM0_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_CIU_QLM0 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000780ull);
+}
+#else
+#define CVMX_CIU_QLM0 (CVMX_ADD_IO_SEG(0x0001070000000780ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM1 CVMX_CIU_QLM1_FUNC()
+static inline uint64_t CVMX_CIU_QLM1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_CIU_QLM1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000788ull);
+}
+#else
+#define CVMX_CIU_QLM1 (CVMX_ADD_IO_SEG(0x0001070000000788ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM2 CVMX_CIU_QLM2_FUNC()
+static inline uint64_t CVMX_CIU_QLM2_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_CIU_QLM2 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000790ull);
+}
+#else
+#define CVMX_CIU_QLM2 (CVMX_ADD_IO_SEG(0x0001070000000790ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM_DCOK CVMX_CIU_QLM_DCOK_FUNC()
+static inline uint64_t CVMX_CIU_QLM_DCOK_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_CIU_QLM_DCOK not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000760ull);
+}
+#else
+#define CVMX_CIU_QLM_DCOK (CVMX_ADD_IO_SEG(0x0001070000000760ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM_JTGC CVMX_CIU_QLM_JTGC_FUNC()
+static inline uint64_t CVMX_CIU_QLM_JTGC_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_CIU_QLM_JTGC not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000768ull);
+}
+#else
+#define CVMX_CIU_QLM_JTGC (CVMX_ADD_IO_SEG(0x0001070000000768ull))
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_QLM_JTGD CVMX_CIU_QLM_JTGD_FUNC()
+static inline uint64_t CVMX_CIU_QLM_JTGD_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_CIU_QLM_JTGD not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000770ull);
+}
+#else
+#define CVMX_CIU_QLM_JTGD (CVMX_ADD_IO_SEG(0x0001070000000770ull))
+#endif
+#define CVMX_CIU_SOFT_BIST (CVMX_ADD_IO_SEG(0x0001070000000738ull))
+#define CVMX_CIU_SOFT_PRST (CVMX_ADD_IO_SEG(0x0001070000000748ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#define CVMX_CIU_SOFT_PRST1 CVMX_CIU_SOFT_PRST1_FUNC()
+static inline uint64_t CVMX_CIU_SOFT_PRST1_FUNC(void)
+{
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
+ cvmx_warn("CVMX_CIU_SOFT_PRST1 not supported on this chip\n");
+ return CVMX_ADD_IO_SEG(0x0001070000000758ull);
+}
+#else
+#define CVMX_CIU_SOFT_PRST1 (CVMX_ADD_IO_SEG(0x0001070000000758ull))
+#endif
+#define CVMX_CIU_SOFT_RST (CVMX_ADD_IO_SEG(0x0001070000000740ull))
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_TIMX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_TIMX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000480ull) + ((offset) & 3) * 8;
+}
+#else
+#define CVMX_CIU_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001070000000480ull) + ((offset) & 3) * 8)
+#endif
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset)
+{
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
+ cvmx_warn("CVMX_CIU_WDOGX(%lu) is invalid on this chip\n", offset);
+ return CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 15) * 8;
+}
+#else
+#define CVMX_CIU_WDOGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 15) * 8)
+#endif
+
+/**
+ * cvmx_ciu_bist
+ */
+union cvmx_ciu_bist
+{
+ uint64_t u64;
+ struct cvmx_ciu_bist_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_5_63 : 59;
+ uint64_t bist : 5; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+ struct cvmx_ciu_bist_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t bist : 4; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_bist_cn30xx cn31xx;
+ struct cvmx_ciu_bist_cn30xx cn38xx;
+ struct cvmx_ciu_bist_cn30xx cn38xxp2;
+ struct cvmx_ciu_bist_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t bist : 2; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_bist_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_3_63 : 61;
+ uint64_t bist : 3; /**< BIST Results.
+ HW sets a bit in BIST for for memory that fails
+ BIST. */
+#else
+ uint64_t bist : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_bist_cn52xx cn52xxp1;
+ struct cvmx_ciu_bist_cn30xx cn56xx;
+ struct cvmx_ciu_bist_cn30xx cn56xxp1;
+ struct cvmx_ciu_bist_cn30xx cn58xx;
+ struct cvmx_ciu_bist_cn30xx cn58xxp1;
+ struct cvmx_ciu_bist_s cn63xx;
+ struct cvmx_ciu_bist_s cn63xxp1;
+};
+typedef union cvmx_ciu_bist cvmx_ciu_bist_t;
+
+/**
+ * cvmx_ciu_block_int
+ *
+ * CIU_BLOCK_INT = CIU Blocks Interrupt
+ *
+ * The interrupt lines from the various chip blocks.
+ */
+union cvmx_ciu_block_int
+{
+ uint64_t u64;
+ struct cvmx_ciu_block_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_43_63 : 21;
+ uint64_t ptp : 1; /**< PTP interrupt
+ See CIU_INT_SUM1[PTP] */
+ uint64_t dpi : 1; /**< DPI interrupt
+ See DPI_INT_REG */
+ uint64_t dfm : 1; /**< DFM interrupt
+ See DFM_FNT_STAT */
+ uint64_t reserved_34_39 : 6;
+ uint64_t srio1 : 1; /**< SRIO1 interrupt
+ See SRIO1_INT_REG */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt
+ See SRIO0_INT_REG */
+ uint64_t reserved_31_31 : 1;
+ uint64_t iob : 1; /**< IOB interrupt
+ See IOB_INT_SUM */
+ uint64_t reserved_29_29 : 1;
+ uint64_t agl : 1; /**< AGL interrupt
+ See AGL_GMX_RX*_INT_REG, AGL_GMX_TX_INT_REG */
+ uint64_t reserved_27_27 : 1;
+ uint64_t pem1 : 1; /**< PEM1 interrupt
+ See PEM1_INT_SUM (enabled by PEM1_INT_ENB) */
+ uint64_t pem0 : 1; /**< PEM0 interrupt
+ See PEM0_INT_SUM (enabled by PEM0_INT_ENB) */
+ uint64_t reserved_23_24 : 2;
+ uint64_t asxpcs0 : 1; /**< See PCS0_INT*_REG, PCSX0_INT_REG */
+ uint64_t reserved_21_21 : 1;
+ uint64_t pip : 1; /**< PIP interrupt
+ See PIP_INT_REG */
+ uint64_t reserved_18_19 : 2;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt
+ See LMC0_INT */
+ uint64_t l2c : 1; /**< L2C interrupt
+ See L2C_INT_REG */
+ uint64_t reserved_15_15 : 1;
+ uint64_t rad : 1; /**< RAD interrupt
+ See RAD_REG_ERROR */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt
+ See UCTL0_INT_REG */
+ uint64_t pow : 1; /**< POW err interrupt
+ See POW_ECC_ERR */
+ uint64_t tim : 1; /**< TIM interrupt
+ See TIM_REG_ERROR */
+ uint64_t pko : 1; /**< PKO interrupt
+ See PKO_REG_ERROR */
+ uint64_t ipd : 1; /**< IPD interrupt
+ See IPD_INT_SUM */
+ uint64_t reserved_8_8 : 1;
+ uint64_t zip : 1; /**< ZIP interrupt
+ See ZIP_ERROR */
+ uint64_t dfa : 1; /**< DFA interrupt
+ See DFA_ERROR */
+ uint64_t fpa : 1; /**< FPA interrupt
+ See FPA_INT_SUM */
+ uint64_t key : 1; /**< KEY interrupt
+ See KEY_INT_SUM */
+ uint64_t sli : 1; /**< SLI interrupt
+ See SLI_INT_SUM (enabled by SLI_INT_ENB_CIU) */
+ uint64_t reserved_2_2 : 1;
+ uint64_t gmx0 : 1; /**< GMX0 interrupt
+ See GMX0_RX*_INT_REG, GMX0_TX_INT_REG */
+ uint64_t mio : 1; /**< MIO boot interrupt
+ See MIO_BOOT_ERR */
+#else
+ uint64_t mio : 1;
+ uint64_t gmx0 : 1;
+ uint64_t reserved_2_2 : 1;
+ uint64_t sli : 1;
+ uint64_t key : 1;
+ uint64_t fpa : 1;
+ uint64_t dfa : 1;
+ uint64_t zip : 1;
+ uint64_t reserved_8_8 : 1;
+ uint64_t ipd : 1;
+ uint64_t pko : 1;
+ uint64_t tim : 1;
+ uint64_t pow : 1;
+ uint64_t usb : 1;
+ uint64_t rad : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t l2c : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_18_19 : 2;
+ uint64_t pip : 1;
+ uint64_t reserved_21_21 : 1;
+ uint64_t asxpcs0 : 1;
+ uint64_t reserved_23_24 : 2;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t reserved_27_27 : 1;
+ uint64_t agl : 1;
+ uint64_t reserved_29_29 : 1;
+ uint64_t iob : 1;
+ uint64_t reserved_31_31 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t reserved_34_39 : 6;
+ uint64_t dfm : 1;
+ uint64_t dpi : 1;
+ uint64_t ptp : 1;
+ uint64_t reserved_43_63 : 21;
+#endif
+ } s;
+ struct cvmx_ciu_block_int_s cn63xx;
+ struct cvmx_ciu_block_int_s cn63xxp1;
+};
+typedef union cvmx_ciu_block_int cvmx_ciu_block_int_t;
+
+/**
+ * cvmx_ciu_dint
+ */
+union cvmx_ciu_dint
+{
+ uint64_t u64;
+ struct cvmx_ciu_dint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t dint : 16; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu_dint_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t dint : 1; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_dint_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t dint : 2; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_dint_s cn38xx;
+ struct cvmx_ciu_dint_s cn38xxp2;
+ struct cvmx_ciu_dint_cn31xx cn50xx;
+ struct cvmx_ciu_dint_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t dint : 4; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_dint_cn52xx cn52xxp1;
+ struct cvmx_ciu_dint_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t dint : 12; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_dint_cn56xx cn56xxp1;
+ struct cvmx_ciu_dint_s cn58xx;
+ struct cvmx_ciu_dint_s cn58xxp1;
+ struct cvmx_ciu_dint_cn63xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t dint : 6; /**< Send DINT pulse to PP vector */
+#else
+ uint64_t dint : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_dint_cn63xx cn63xxp1;
+};
+typedef union cvmx_ciu_dint cvmx_ciu_dint_t;
+
+/**
+ * cvmx_ciu_fuse
+ */
+union cvmx_ciu_fuse
+{
+ uint64_t u64;
+ struct cvmx_ciu_fuse_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t fuse : 16; /**< Physical PP is present */
+#else
+ uint64_t fuse : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } s;
+ struct cvmx_ciu_fuse_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t fuse : 1; /**< Physical PP is present */
+#else
+ uint64_t fuse : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_fuse_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t fuse : 2; /**< Physical PP is present */
+#else
+ uint64_t fuse : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_fuse_s cn38xx;
+ struct cvmx_ciu_fuse_s cn38xxp2;
+ struct cvmx_ciu_fuse_cn31xx cn50xx;
+ struct cvmx_ciu_fuse_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_4_63 : 60;
+ uint64_t fuse : 4; /**< Physical PP is present */
+#else
+ uint64_t fuse : 4;
+ uint64_t reserved_4_63 : 60;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_fuse_cn52xx cn52xxp1;
+ struct cvmx_ciu_fuse_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t fuse : 12; /**< Physical PP is present */
+#else
+ uint64_t fuse : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_fuse_cn56xx cn56xxp1;
+ struct cvmx_ciu_fuse_s cn58xx;
+ struct cvmx_ciu_fuse_s cn58xxp1;
+ struct cvmx_ciu_fuse_cn63xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_6_63 : 58;
+ uint64_t fuse : 6; /**< Physical PP is present */
+#else
+ uint64_t fuse : 6;
+ uint64_t reserved_6_63 : 58;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_fuse_cn63xx cn63xxp1;
+};
+typedef union cvmx_ciu_fuse cvmx_ciu_fuse_t;
+
+/**
+ * cvmx_ciu_gstop
+ */
+union cvmx_ciu_gstop
+{
+ uint64_t u64;
+ struct cvmx_ciu_gstop_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t gstop : 1; /**< GSTOP bit */
+#else
+ uint64_t gstop : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+ struct cvmx_ciu_gstop_s cn30xx;
+ struct cvmx_ciu_gstop_s cn31xx;
+ struct cvmx_ciu_gstop_s cn38xx;
+ struct cvmx_ciu_gstop_s cn38xxp2;
+ struct cvmx_ciu_gstop_s cn50xx;
+ struct cvmx_ciu_gstop_s cn52xx;
+ struct cvmx_ciu_gstop_s cn52xxp1;
+ struct cvmx_ciu_gstop_s cn56xx;
+ struct cvmx_ciu_gstop_s cn56xxp1;
+ struct cvmx_ciu_gstop_s cn58xx;
+ struct cvmx_ciu_gstop_s cn58xxp1;
+ struct cvmx_ciu_gstop_s cn63xx;
+ struct cvmx_ciu_gstop_s cn63xxp1;
+};
+typedef union cvmx_ciu_gstop cvmx_ciu_gstop_t;
+
+/**
+ * cvmx_ciu_int#_en0
+ *
+ * Notes:
+ * CIU_INT0_EN0: PP0 /IP2
+ * CIU_INT1_EN0: PP0 /IP3
+ * ...
+ * CIU_INT6_EN0: PP3/IP2
+ * CIU_INT7_EN0: PP3/IP3
+ * (hole)
+ * CIU_INT32_EN0: PCI /IP
+ */
+union cvmx_ciu_intx_en0
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt enable */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt enable */
+ uint64_t powiq : 1; /**< POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< General timer interrupt enables */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt enable */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< RML Interrupt enable */
+ uint64_t twsi : 1; /**< TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Two UART interrupt enables */
+ uint64_t mbox : 2; /**< Two mailbox/PCIe/sRIO interrupt enables */
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enables */
+ uint64_t workq : 16; /**< 16 work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en0_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_intx_en0_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_intx_en0_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_intx_en0_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_en0_cn30xx cn50xx;
+ struct cvmx_ciu_intx_en0_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_en0_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en0_cn38xx cn58xx;
+ struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_en0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_cn52xx cn63xxp1;
+};
+typedef union cvmx_ciu_intx_en0 cvmx_ciu_intx_en0_t;
+
+/**
+ * cvmx_ciu_int#_en0_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTx_EN0 register
+ *
+ */
+union cvmx_ciu_intx_en0_w1c
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Write 1 to clear Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to clr RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to clear POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< Write 1 to clear 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< Write 1 to clear USB EHCI or OHCI Interrupt */
+ uint64_t timer : 4; /**< Write 1 to clear General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< Write 1 to clear IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to clear GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to clear Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to clear RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to clear TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to clear PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to clear PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to clear UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to clear mailbox/PCIe/sRIO interrupt
+ enables */
+ uint64_t gpio : 16; /**< Write 1s to clear GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to clear work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_w1c_s cn56xx;
+ struct cvmx_ciu_intx_en0_w1c_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xxp1;
+};
+typedef union cvmx_ciu_intx_en0_w1c cvmx_ciu_intx_en0_w1c_t;
+
+/**
+ * cvmx_ciu_int#_en0_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTx_EN0 register
+ *
+ */
+union cvmx_ciu_intx_en0_w1s
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Write 1 to set Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to set RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to set IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to set POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< Write 1 to set 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< Write 1 to set USB EHCI or OHCI Interrupt */
+ uint64_t timer : 4; /**< Write 1 to set General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< Write 1 to set IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to set GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to set Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to set RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to set TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to set PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to set PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to set UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to set mailbox/PCIe/sRIO interrupt
+ enables */
+ uint64_t gpio : 16; /**< Write 1s to set GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to set work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_w1s_s cn56xx;
+ struct cvmx_ciu_intx_en0_w1s_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xxp1;
+};
+typedef union cvmx_ciu_intx_en0_w1s cvmx_ciu_intx_en0_w1s_t;
+
+/**
+ * cvmx_ciu_int#_en1
+ *
+ * Notes:
+ * @verbatim
+ * PPx/IP2 will be raised when...
+ *
+ * n = x*2
+ * PPx/IP2 = |([CIU_INT_SUM1, CIU_INTn_SUM0] & [CIU_INTn_EN1, CIU_INTn_EN0])
+ *
+ * PPx/IP3 will be raised when...
+ *
+ * n = x*2 + 1
+ * PPx/IP3 = |([CIU_INT_SUM1, CIU_INTn_SUM0] & [CIU_INTn_EN1, CIU_INTn_EN0])
+ *
+ * PCI/INT will be raised when...
+ *
+ * PCI/INT = |([CIU_INT_SUM1, CIU_INT32_SUM0] & [CIU_INT32_EN1, CIU_INT32_EN0])
+ * @endverbatim
+ */
+union cvmx_ciu_intx_en1
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en1_cn30xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_1_63 : 63;
+ uint64_t wdog : 1; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } cn30xx;
+ struct cvmx_ciu_intx_en1_cn31xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t wdog : 2; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn31xx;
+ struct cvmx_ciu_intx_en1_cn38xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn38xx;
+ struct cvmx_ciu_intx_en1_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_en1_cn31xx cn50xx;
+ struct cvmx_ciu_intx_en1_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_cn52xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_19_63 : 45;
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn52xxp1;
+ struct cvmx_ciu_intx_en1_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en1_cn38xx cn58xx;
+ struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_en1_cn63xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt enable */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_cn63xx cn63xxp1;
+};
+typedef union cvmx_ciu_intx_en1 cvmx_ciu_intx_en1_t;
+
+/**
+ * cvmx_ciu_int#_en1_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTx_EN1 register
+ *
+ */
+union cvmx_ciu_intx_en1_w1c
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< Write 1 to clear DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to clear SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to clear SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MII/MIX Interface 1
+ Interrupt enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en1_w1c_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_w1c_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_w1c_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en1_w1c_cn63xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< Write 1 to clear DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to clear SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to clear SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MII/MIX Interface 1
+ Interrupt enable */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_w1c_cn63xx cn63xxp1;
+};
+typedef union cvmx_ciu_intx_en1_w1c cvmx_ciu_intx_en1_w1c_t;
+
+/**
+ * cvmx_ciu_int#_en1_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTx_EN1 register
+ *
+ */
+union cvmx_ciu_intx_en1_w1s
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< Write 1 to set DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to set SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to set SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to set AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to set DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to set ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to set RGMII/MII/MIX Interface 1 Interrupt
+ enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en1_w1s_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_w1s_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_w1s_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en1_w1s_cn63xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t rst : 1; /**< Write 1 to set MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< Write 1 to set DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to set LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to set SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to set SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to set PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to set PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to set PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to set AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< Write 1 to set GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to set DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to set SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to set USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to set DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to set KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to set RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to set TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to set ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to set PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to set PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to set IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to set L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to set POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to set FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to set IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to set MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to set NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to set RGMII/MII/MIX Interface 1 Interrupt
+ enable */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< Write 1s to set Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_w1s_cn63xx cn63xxp1;
+};
+typedef union cvmx_ciu_intx_en1_w1s cvmx_ciu_intx_en1_w1s_t;
+
+/**
+ * cvmx_ciu_int#_en4_0
+ *
+ * Notes:
+ * CIU_INT0_EN4_0: PP0 /IP4
+ * CIU_INT1_EN4_0: PP1 /IP4
+ * ...
+ * CIU_INT11_EN4_0: PP11 /IP4
+ */
+union cvmx_ciu_intx_en4_0
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt enable */
+ uint64_t mii : 1; /**< RGMII/MII/MIX Interface 0 Interrupt enable */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt enable */
+ uint64_t powiq : 1; /**< POW IQ interrupt enable */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt enable */
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB EHCI or OHCI Interrupt enable */
+ uint64_t timer : 4; /**< General timer interrupt enables */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop interrupt enable */
+ uint64_t gmx_drp : 2; /**< GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< RML Interrupt enable */
+ uint64_t twsi : 1; /**< TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Two UART interrupt enables */
+ uint64_t mbox : 2; /**< Two mailbox interrupt enables */
+ uint64_t gpio : 16; /**< 16 GPIO interrupt enables */
+ uint64_t workq : 16; /**< 16 work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_0_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_59_63 : 5;
+ uint64_t mpi : 1; /**< MPI/SPI interrupt */
+ uint64_t pcm : 1; /**< PCM/TDM interrupt */
+ uint64_t usb : 1; /**< USB interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t reserved_47_47 : 1;
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t reserved_47_47 : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t pcm : 1;
+ uint64_t mpi : 1;
+ uint64_t reserved_59_63 : 5;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_en4_0_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en4_0_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn63xxp1;
+};
+typedef union cvmx_ciu_intx_en4_0 cvmx_ciu_intx_en4_0_t;
+
+/**
+ * cvmx_ciu_int#_en4_0_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTx_EN4_0 register
+ *
+ */
+union cvmx_ciu_intx_en4_0_w1c
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Write 1 to clear Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to clr RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to clear IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to clear POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< Write 1 to clear 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< Write 1 to clear USB EHCI or OHCI Interrupt */
+ uint64_t timer : 4; /**< Write 1 to clear General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< Write 1 to clear IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to clear GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to clear Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to clear RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to clear TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to clear PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to clear PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to clear UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to clear mailbox interrupt enables */
+ uint64_t gpio : 16; /**< Write 1s to clear GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to clear work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_w1c_s cn56xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xxp1;
+};
+typedef union cvmx_ciu_intx_en4_0_w1c cvmx_ciu_intx_en4_0_w1c_t;
+
+/**
+ * cvmx_ciu_int#_en4_0_w1s
+ *
+ * Notes:
+ * Write-1-to-set version of the CIU_INTx_EN4_0 register
+ *
+ */
+union cvmx_ciu_intx_en4_0_w1s
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Write 1 to set Boot bus DMA engines Interrupt
+ enable */
+ uint64_t mii : 1; /**< Write 1 to set RGMII/MII/MIX Interface 0 Interrupt
+ enable */
+ uint64_t ipdppthr : 1; /**< Write 1 to set IPD per-port counter threshold
+ interrupt enable */
+ uint64_t powiq : 1; /**< Write 1 to set POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< Write 1 to set 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< Write 1 to set USB EHCI or OHCI Interrupt */
+ uint64_t timer : 4; /**< Write 1 to set General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< Write 1 to set IPD QOS packet drop interrupt
+ enable */
+ uint64_t gmx_drp : 2; /**< Write 1 to set GMX packet drop interrupt enable */
+ uint64_t trace : 1; /**< Write 1 to set Trace buffer interrupt enable */
+ uint64_t rml : 1; /**< Write 1 to set RML Interrupt enable */
+ uint64_t twsi : 1; /**< Write 1 to set TWSI Interrupt enable */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< Write 1s to set PCIe/sRIO MSI enables */
+ uint64_t pci_int : 4; /**< Write 1s to set PCIe INTA/B/C/D enables */
+ uint64_t uart : 2; /**< Write 1s to set UART interrupt enables */
+ uint64_t mbox : 2; /**< Write 1s to set mailbox interrupt enables */
+ uint64_t gpio : 16; /**< Write 1s to set GPIO interrupt enables */
+ uint64_t workq : 16; /**< Write 1s to set work queue interrupt enables */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */
+ uint64_t mii : 1; /**< MII Interface Interrupt */
+ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */
+ uint64_t powiq : 1; /**< POW IQ interrupt */
+ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */
+ uint64_t reserved_57_58 : 2;
+ uint64_t usb : 1; /**< USB Interrupt */
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t reserved_51_51 : 1;
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t reserved_49_49 : 1;
+ uint64_t gmx_drp : 1; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 1;
+ uint64_t reserved_49_49 : 1;
+ uint64_t ipd_drp : 1;
+ uint64_t reserved_51_51 : 1;
+ uint64_t timer : 4;
+ uint64_t usb : 1;
+ uint64_t reserved_57_58 : 2;
+ uint64_t twsi2 : 1;
+ uint64_t powiq : 1;
+ uint64_t ipdppthr : 1;
+ uint64_t mii : 1;
+ uint64_t bootdma : 1;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_w1s_s cn56xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_56_63 : 8;
+ uint64_t timer : 4; /**< General timer interrupts */
+ uint64_t key_zero : 1; /**< Key Zeroization interrupt */
+ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */
+ uint64_t gmx_drp : 2; /**< GMX packet drop */
+ uint64_t trace : 1; /**< L2C has the CMB trace buffer */
+ uint64_t rml : 1; /**< RML Interrupt */
+ uint64_t twsi : 1; /**< TWSI Interrupt */
+ uint64_t reserved_44_44 : 1;
+ uint64_t pci_msi : 4; /**< PCI MSI */
+ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */
+ uint64_t uart : 2; /**< Two UART interrupts */
+ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */
+ uint64_t gpio : 16; /**< 16 GPIO interrupts */
+ uint64_t workq : 16; /**< 16 work queue interrupts */
+#else
+ uint64_t workq : 16;
+ uint64_t gpio : 16;
+ uint64_t mbox : 2;
+ uint64_t uart : 2;
+ uint64_t pci_int : 4;
+ uint64_t pci_msi : 4;
+ uint64_t reserved_44_44 : 1;
+ uint64_t twsi : 1;
+ uint64_t rml : 1;
+ uint64_t trace : 1;
+ uint64_t gmx_drp : 2;
+ uint64_t ipd_drp : 1;
+ uint64_t key_zero : 1;
+ uint64_t timer : 4;
+ uint64_t reserved_56_63 : 8;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xxp1;
+};
+typedef union cvmx_ciu_intx_en4_0_w1s cvmx_ciu_intx_en4_0_w1s_t;
+
+/**
+ * cvmx_ciu_int#_en4_1
+ *
+ * Notes:
+ * PPx/IP4 will be raised when...
+ * PPx/IP4 = |([CIU_INT_SUM1, CIU_INTx_SUM4] & [CIU_INTx_EN4_1, CIU_INTx_EN4_0])
+ */
+union cvmx_ciu_intx_en4_1
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_1_cn50xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_2_63 : 62;
+ uint64_t wdog : 2; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 2;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } cn50xx;
+ struct cvmx_ciu_intx_en4_1_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_cn52xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_19_63 : 45;
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t reserved_19_63 : 45;
+#endif
+ } cn52xxp1;
+ struct cvmx_ciu_intx_en4_1_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en4_1_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_en4_1_cn63xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t rst : 1; /**< MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< PTP interrupt enable */
+ uint64_t agl : 1; /**< AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< DPI interrupt enable */
+ uint64_t sli : 1; /**< SLI interrupt enable */
+ uint64_t usb : 1; /**< USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< DFA interrupt enable */
+ uint64_t key : 1; /**< KEY interrupt enable */
+ uint64_t rad : 1; /**< RAD interrupt enable */
+ uint64_t tim : 1; /**< TIM interrupt enable */
+ uint64_t zip : 1; /**< ZIP interrupt enable */
+ uint64_t pko : 1; /**< PKO interrupt enable */
+ uint64_t pip : 1; /**< PIP interrupt enable */
+ uint64_t ipd : 1; /**< IPD interrupt enable */
+ uint64_t l2c : 1; /**< L2C interrupt enable */
+ uint64_t pow : 1; /**< POW err interrupt enable */
+ uint64_t fpa : 1; /**< FPA interrupt enable */
+ uint64_t iob : 1; /**< IOB interrupt enable */
+ uint64_t mio : 1; /**< MIO boot interrupt enable */
+ uint64_t nand : 1; /**< NAND Flash Controller interrupt enable */
+ uint64_t mii1 : 1; /**< RGMII/MII/MIX Interface 1 Interrupt enable */
+ uint64_t reserved_6_17 : 12;
+ uint64_t wdog : 6; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 6;
+ uint64_t reserved_6_17 : 12;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } cn63xx;
+ struct cvmx_ciu_intx_en4_1_cn63xx cn63xxp1;
+};
+typedef union cvmx_ciu_intx_en4_1 cvmx_ciu_intx_en4_1_t;
+
+/**
+ * cvmx_ciu_int#_en4_1_w1c
+ *
+ * Notes:
+ * Write-1-to-clear version of the CIU_INTx_EN4_1 register
+ *
+ */
+union cvmx_ciu_intx_en4_1_w1c
+{
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< Write 1 to clear DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to clear SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to clear SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL interrupt enable */
+ uint64_t reserved_37_45 : 9;
+ uint64_t agx0 : 1; /**< Write 1 to clear GMX0 interrupt enable */
+ uint64_t dpi : 1; /**< Write 1 to clear DPI interrupt enable */
+ uint64_t sli : 1; /**< Write 1 to clear SLI interrupt enable */
+ uint64_t usb : 1; /**< Write 1 to clear USB UCTL0 interrupt enable */
+ uint64_t dfa : 1; /**< Write 1 to clear DFA interrupt enable */
+ uint64_t key : 1; /**< Write 1 to clear KEY interrupt enable */
+ uint64_t rad : 1; /**< Write 1 to clear RAD interrupt enable */
+ uint64_t tim : 1; /**< Write 1 to clear TIM interrupt enable */
+ uint64_t zip : 1; /**< Write 1 to clear ZIP interrupt enable */
+ uint64_t pko : 1; /**< Write 1 to clear PKO interrupt enable */
+ uint64_t pip : 1; /**< Write 1 to clear PIP interrupt enable */
+ uint64_t ipd : 1; /**< Write 1 to clear IPD interrupt enable */
+ uint64_t l2c : 1; /**< Write 1 to clear L2C interrupt enable */
+ uint64_t pow : 1; /**< Write 1 to clear POW err interrupt enable */
+ uint64_t fpa : 1; /**< Write 1 to clear FPA interrupt enable */
+ uint64_t iob : 1; /**< Write 1 to clear IOB interrupt enable */
+ uint64_t mio : 1; /**< Write 1 to clear MIO boot interrupt enable */
+ uint64_t nand : 1; /**< Write 1 to clear NAND Flash Controller interrupt
+ enable */
+ uint64_t mii1 : 1; /**< Write 1 to clear RGMII/MII/MIX Interface 1
+ Interrupt enable */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t wdog : 16; /**< Write 1s to clear Watchdog summary interrupt enable */
+#else
+ uint64_t wdog : 16;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t mio : 1;
+ uint64_t iob : 1;
+ uint64_t fpa : 1;
+ uint64_t pow : 1;
+ uint64_t l2c : 1;
+ uint64_t ipd : 1;
+ uint64_t pip : 1;
+ uint64_t pko : 1;
+ uint64_t zip : 1;
+ uint64_t tim : 1;
+ uint64_t rad : 1;
+ uint64_t key : 1;
+ uint64_t dfa : 1;
+ uint64_t usb : 1;
+ uint64_t sli : 1;
+ uint64_t dpi : 1;
+ uint64_t agx0 : 1;
+ uint64_t reserved_37_45 : 9;
+ uint64_t agl : 1;
+ uint64_t ptp : 1;
+ uint64_t pem0 : 1;
+ uint64_t pem1 : 1;
+ uint64_t srio0 : 1;
+ uint64_t srio1 : 1;
+ uint64_t lmc0 : 1;
+ uint64_t reserved_53_55 : 3;
+ uint64_t dfm : 1;
+ uint64_t reserved_57_62 : 6;
+ uint64_t rst : 1;
+#endif
+ } s;
+ struct cvmx_ciu_intx_en4_1_w1c_cn52xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_20_63 : 44;
+ uint64_t nand : 1; /**< NAND Flash Controller */
+ uint64_t mii1 : 1; /**< Second MII Interrupt */
+ uint64_t usb1 : 1; /**< Second USB Interrupt */
+ uint64_t uart2 : 1; /**< Third UART interrupt */
+ uint64_t reserved_4_15 : 12;
+ uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */
+#else
+ uint64_t wdog : 4;
+ uint64_t reserved_4_15 : 12;
+ uint64_t uart2 : 1;
+ uint64_t usb1 : 1;
+ uint64_t mii1 : 1;
+ uint64_t nand : 1;
+ uint64_t reserved_20_63 : 44;
+#endif
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn56xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_12_63 : 52;
+ uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 12;
+ uint64_t reserved_12_63 : 52;
+#endif
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn58xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t reserved_16_63 : 48;
+ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */
+#else
+ uint64_t wdog : 16;
+ uint64_t reserved_16_63 : 48;
+#endif
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn63xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t rst : 1; /**< Write 1 to clear MIO RST interrupt enable */
+ uint64_t reserved_57_62 : 6;
+ uint64_t dfm : 1; /**< Write 1 to clear DFM interrupt enable */
+ uint64_t reserved_53_55 : 3;
+ uint64_t lmc0 : 1; /**< Write 1 to clear LMC0 interrupt enable */
+ uint64_t srio1 : 1; /**< Write 1 to clear SRIO1 interrupt enable */
+ uint64_t srio0 : 1; /**< Write 1 to clear SRIO0 interrupt enable */
+ uint64_t pem1 : 1; /**< Write 1 to clear PEM1 interrupt enable */
+ uint64_t pem0 : 1; /**< Write 1 to clear PEM0 interrupt enable */
+ uint64_t ptp : 1; /**< Write 1 to clear PTP interrupt enable */
+ uint64_t agl : 1; /**< Write 1 to clear AGL i