aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJuli Mallett <jmallett@FreeBSD.org>2010-07-20 07:11:19 +0000
committerJuli Mallett <jmallett@FreeBSD.org>2010-07-20 07:11:19 +0000
commit1c305b501145f696d3597fb9b5b2091caaa6f67c (patch)
tree776ea14a76df76cd5ee4d9b63107c1e819c68914
downloadsrc-1c305b501145f696d3597fb9b5b2091caaa6f67c.tar.gz
src-1c305b501145f696d3597fb9b5b2091caaa6f67c.zip
Initial import of Cavium Networks Octeon Simple Executive, SDK version 1.9.0.vendor/octeon-sdk/1.9.0
Notes
Notes: svn path=/vendor-sys/octeon-sdk/dist/; revision=210284 svn path=/vendor-sys/octeon-sdk/1.9.0/; revision=210285; tag=vendor/octeon-sdk/1.9.0
-rw-r--r--README.txt43
-rw-r--r--cvmip.h207
-rw-r--r--cvmx-abi.h92
-rw-r--r--cvmx-access-native.h667
-rw-r--r--cvmx-access.h228
-rw-r--r--cvmx-address.h253
-rw-r--r--cvmx-app-init-linux.c457
-rw-r--r--cvmx-app-init.c616
-rw-r--r--cvmx-app-init.h295
-rw-r--r--cvmx-asm.h513
-rw-r--r--cvmx-asx.h66
-rw-r--r--cvmx-atomic.h666
-rw-r--r--cvmx-bootloader.h147
-rw-r--r--cvmx-bootmem.c952
-rw-r--r--cvmx-bootmem.h429
-rw-r--r--cvmx-ciu.h65
-rw-r--r--cvmx-cmd-queue.c309
-rw-r--r--cvmx-cmd-queue.h604
-rw-r--r--cvmx-cn3010-evb-hs5.c214
-rw-r--r--cvmx-cn3010-evb-hs5.h69
-rw-r--r--cvmx-compactflash.c431
-rw-r--r--cvmx-compactflash.h76
-rw-r--r--cvmx-core.c147
-rw-r--r--cvmx-core.h166
-rw-r--r--cvmx-coremask.c132
-rw-r--r--cvmx-coremask.h161
-rw-r--r--cvmx-csr-addresses.h15490
-rw-r--r--cvmx-csr-db-support.c216
-rw-r--r--cvmx-csr-db.c74292
-rw-r--r--cvmx-csr-db.h179
-rw-r--r--cvmx-csr-enums.h183
-rw-r--r--cvmx-csr-typedefs.h73991
-rw-r--r--cvmx-csr.h222
-rw-r--r--cvmx-cvmmem.h73
-rw-r--r--cvmx-dfa.c120
-rw-r--r--cvmx-dfa.h800
-rw-r--r--cvmx-dma-engine.c464
-rw-r--r--cvmx-dma-engine.h332
-rw-r--r--cvmx-ebt3000.c112
-rw-r--r--cvmx-ebt3000.h67
-rw-r--r--cvmx-fau.h636
-rw-r--r--cvmx-flash.c672
-rw-r--r--cvmx-flash.h134
-rw-r--r--cvmx-fpa.c193
-rw-r--r--cvmx-fpa.h300
-rw-r--r--cvmx-gmx.h94
-rw-r--r--cvmx-gpio.h122
-rw-r--r--cvmx-helper-board.c653
-rw-r--r--cvmx-helper-board.h211
-rw-r--r--cvmx-helper-check-defines.h102
-rw-r--r--cvmx-helper-errata.c340
-rw-r--r--cvmx-helper-errata.h105
-rw-r--r--cvmx-helper-fpa.c246
-rw-r--r--cvmx-helper-fpa.h81
-rw-r--r--cvmx-helper-loop.c113
-rw-r--r--cvmx-helper-loop.h80
-rw-r--r--cvmx-helper-npi.c126
-rw-r--r--cvmx-helper-npi.h80
-rw-r--r--cvmx-helper-rgmii.c507
-rw-r--r--cvmx-helper-rgmii.h129
-rw-r--r--cvmx-helper-sgmii.c523
-rw-r--r--cvmx-helper-sgmii.h123
-rw-r--r--cvmx-helper-spi.c219
-rw-r--r--cvmx-helper-spi.h107
-rw-r--r--cvmx-helper-util.c564
-rw-r--r--cvmx-helper-util.h277
-rw-r--r--cvmx-helper-xaui.c334
-rw-r--r--cvmx-helper-xaui.h124
-rw-r--r--cvmx-helper.c835
-rw-r--r--cvmx-helper.h259
-rw-r--r--cvmx-higig.h283
-rw-r--r--cvmx-interrupt-decodes.c3584
-rw-r--r--cvmx-interrupt-handler.S181
-rw-r--r--cvmx-interrupt-rsl.c762
-rw-r--r--cvmx-interrupt.c528
-rw-r--r--cvmx-interrupt.h306
-rw-r--r--cvmx-iob.h66
-rw-r--r--cvmx-ipd.h309
-rw-r--r--cvmx-key.h113
-rw-r--r--cvmx-l2c.c747
-rw-r--r--cvmx-l2c.h364
-rw-r--r--cvmx-llm.c956
-rw-r--r--cvmx-llm.h401
-rw-r--r--cvmx-lmc.h66
-rw-r--r--cvmx-log-arc.S176
-rw-r--r--cvmx-log.c536
-rw-r--r--cvmx-log.h212
-rw-r--r--cvmx-malloc.h219
-rw-r--r--cvmx-malloc/README-malloc12
-rw-r--r--cvmx-malloc/arena.c293
-rw-r--r--cvmx-malloc/malloc.c4106
-rw-r--r--cvmx-malloc/malloc.h213
-rw-r--r--cvmx-malloc/thread-m.h73
-rw-r--r--cvmx-mdio.h559
-rw-r--r--cvmx-mgmt-port.c759
-rw-r--r--cvmx-mgmt-port.h193
-rw-r--r--cvmx-mio.h66
-rw-r--r--cvmx-nand.c1719
-rw-r--r--cvmx-nand.h675
-rw-r--r--cvmx-npi.h147
-rw-r--r--cvmx-packet.h84
-rw-r--r--cvmx-pci.h68
-rw-r--r--cvmx-pcie.c1062
-rw-r--r--cvmx-pcie.h304
-rw-r--r--cvmx-pip.h475
-rw-r--r--cvmx-pko.c466
-rw-r--r--cvmx-pko.h564
-rw-r--r--cvmx-platform.h196
-rw-r--r--cvmx-pow.c481
-rw-r--r--cvmx-pow.h1749
-rw-r--r--cvmx-raid.c132
-rw-r--r--cvmx-raid.h202
-rw-r--r--cvmx-resources.config172
-rw-r--r--cvmx-rng.h162
-rw-r--r--cvmx-rtc.h160
-rw-r--r--cvmx-rwlock.h169
-rw-r--r--cvmx-scratch.h161
-rw-r--r--cvmx-shared-linux-n32.ld279
-rw-r--r--cvmx-shared-linux-o32.ld277
-rw-r--r--cvmx-shared-linux.ld278
-rw-r--r--cvmx-spi.c639
-rw-r--r--cvmx-spi.h265
-rw-r--r--cvmx-spi4000.c520
-rw-r--r--cvmx-spinlock.h430
-rw-r--r--cvmx-swap.h141
-rw-r--r--cvmx-sysinfo.c220
-rw-r--r--cvmx-sysinfo.h166
-rw-r--r--cvmx-thunder.c328
-rw-r--r--cvmx-thunder.h148
-rw-r--r--cvmx-tim.c270
-rw-r--r--cvmx-tim.h333
-rw-r--r--cvmx-tra.c322
-rw-r--r--cvmx-tra.h411
-rw-r--r--cvmx-twsi-raw.c464
-rw-r--r--cvmx-twsi-raw.h331
-rw-r--r--cvmx-twsi.c285
-rw-r--r--cvmx-twsi.h313
-rw-r--r--cvmx-uart.h74
-rw-r--r--cvmx-usb.c3650
-rw-r--r--cvmx-usb.h1129
-rw-r--r--cvmx-utils.h283
-rw-r--r--cvmx-version.h12
-rw-r--r--cvmx-warn.c79
-rw-r--r--cvmx-warn.h72
-rw-r--r--cvmx-wqe.h315
-rw-r--r--cvmx-zip.c127
-rw-r--r--cvmx-zip.h247
-rw-r--r--cvmx-zone.c169
-rw-r--r--cvmx.h90
-rw-r--r--cvmx.mk144
-rw-r--r--executive-config.h.template180
-rw-r--r--octeon-feature.h140
-rw-r--r--octeon-model.c393
-rw-r--r--octeon-model.h307
-rw-r--r--octeon-pci-console.c480
-rw-r--r--octeon-pci-console.h138
-rw-r--r--perfzilla_screen.pngbin0 -> 27110 bytes
157 files changed, 222515 insertions, 0 deletions
diff --git a/README.txt b/README.txt
new file mode 100644
index 000000000000..553c46df946a
--- /dev/null
+++ b/README.txt
@@ -0,0 +1,43 @@
+Readme for the Octeon Executive Library
+
+
+The Octeon Executive Library provides runtime support and hardware
+abstraction for the Octeon processor. The executive is composed of the
+libcvmx.a library as well as header files that provide
+functionality with inline functions.
+
+
+Usage:
+
+The libcvmx.a library is built for every application as part of the
+application build. (Please refer to the 'related pages' section of the
+HTML documentation for more information on the build system.)
+Applications using the executive should include the header files from
+$OCTEON_ROOT/target/include and link against the library that is built in
+the local obj directory. Each file using the executive
+should include the following two header files in order:
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+
+The cvmx-config.h file contains configuration information for the
+executive and is generated by the cvmx-config script from an
+'executive-config.h' file. A sample version of this file is provided
+in the executive directory as 'executive-config.h.template'.
+
+Copy this file to 'executive-config.h' into the 'config' subdirectory
+of the application directory and customize as required by the application.
+Applications that don't use any simple executive functionality can omit
+the cvmx-config.h header file. Please refer to the examples for a
+demonstration of where to put the executive-config.h file and for an
+example of generated cvmx-config.h.
+
+For file specific information please see the documentation within the
+source files or the HTML documentation provided in docs/html/index.html.
+The HTML documentation is automatically generated by Doxygen from the
+source files.
+
+
+
+==========================================================================
+Please see the release notes for version specific information.
diff --git a/cvmip.h b/cvmip.h
new file mode 100644
index 000000000000..a8ac16d5bdaa
--- /dev/null
+++ b/cvmip.h
@@ -0,0 +1,207 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Cavium Networks Internet Protocol (IP)
+ *
+ * Definitions for the Internet Protocol (IP) support.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+
+#ifndef __CVMIP_H__
+#define __CVMIP_H__
+
+
+/*
+ * IP protocol values (1 byte)
+ *
+ */
+#define CVMIP_PROTO_ICMP 1 /* Internet Control Message Protocol */
+#define CVMIP_PROTO_TCP 6 /* Transmission Control Protocol */
+#define CVMIP_PROTO_UDP 17 /* User Datagram Protocol */
+#define CVMIP_PROTO_ESP 50 /* Encapsulated Security Payload */
+#define CVMIP_PROTO_AH 51 /* Authentication Header */
+
+
+/**
+ * network packet header definitions
+ * (originally from octane_hw.h)
+ *
+ */
+
+/**
+ * UDP Packet header
+ */
+typedef struct {
+ union {
+ int32_t s32 ;
+ uint32_t u32 ;
+ struct {
+ uint16_t src_prt ;
+ uint16_t dst_prt ;
+ } s;
+ } prts;
+ uint16_t len ;
+ uint16_t chksum ;
+} cvmip_udp_hdr_t;
+
+/**
+ * TCP Packet header
+ */
+typedef struct {
+ uint16_t src_prt ;
+ uint16_t dst_prt ;
+ uint32_t seq ;
+ uint32_t ack_seq ;
+ uint32_t hlen :4;
+ uint32_t rsvd :6;
+ uint32_t urg :1;
+ uint32_t ack :1;
+ uint32_t psh :1;
+ uint32_t rst :1;
+ uint32_t syn :1;
+ uint32_t fin :1;
+ uint16_t win_sz ;
+ uint16_t chksum ;
+ uint16_t urg_ptr ;
+ uint32_t junk ;
+} cvmip_tcp_hdr_t;
+
+/**
+ * L4 Packet header
+ */
+typedef union {
+ cvmip_udp_hdr_t udphdr;
+ cvmip_tcp_hdr_t tcphdr;
+ struct {
+ union {
+ int32_t s32 ;
+ uint32_t u32 ;
+ struct {
+ uint16_t src_prt;
+ uint16_t dst_prt;
+ } s;
+ } prts;
+ uint16_t len ;
+ uint16_t chksum ;
+ char dat[48] ; // 48 for IPv6 with no extension hdrs, 64 for IPv4 without options
+ } udp;
+ struct {
+ uint16_t src_prt ;
+ uint16_t dst_prt ;
+ uint32_t seq ;
+ uint32_t ack_seq ;
+ uint32_t hlen :4;
+ uint32_t rsvd :6;
+ uint32_t urg :1;
+ uint32_t ack :1;
+ uint32_t psh :1;
+ uint32_t rst :1;
+ uint32_t syn :1;
+ uint32_t fin :1;
+ uint16_t win_sz ;
+ uint16_t chksum ;
+ uint16_t urg_ptr ;
+ char dat[36] ; // 36 for IPv6 with no extension hdrs, 52 for IPv6 without options
+ } tcp;
+} cvmip_l4_info_t;
+
+/**
+ * Special struct to add a pad to IPv4 header
+ */
+typedef struct {
+ uint32_t pad;
+
+ uint32_t version : 4;
+ uint32_t hl : 4;
+ uint8_t tos ;
+ uint16_t len ;
+
+ uint16_t id ;
+ uint32_t mbz : 1;
+ uint32_t df : 1;
+ uint32_t mf : 1;
+ uint32_t off :13;
+
+ uint8_t ttl ;
+ uint8_t protocol;
+ uint16_t chksum ;
+
+ union {
+ uint64_t u64;
+ struct {
+ uint32_t src;
+ uint32_t dst;
+ } s;
+ } src_dst;
+} cvmip_ipv4_hdr_t;
+
+/**
+ * IPv6 Packet header
+ */
+typedef struct {
+
+ uint32_t version : 4;
+ uint32_t v6class : 8;
+ uint32_t flow :20;
+
+ uint16_t len ; // includes extension headers plus payload (add 40 to be equiv to v4 len field)
+ uint8_t next_hdr; // equivalent to the v4 protocol field
+ uint8_t hop_lim ; // equivalent to the v4 TTL field
+
+ union {
+ uint64_t u64[4];
+ struct {
+ uint64_t src[2];
+ uint64_t dst[2];
+ } s;
+ } src_dst;
+
+} cvmip_ipv6_hdr_t;
+
+
+#endif /* __CVMIP_H__ */
diff --git a/cvmx-abi.h b/cvmx-abi.h
new file mode 100644
index 000000000000..0711558d768f
--- /dev/null
+++ b/cvmx-abi.h
@@ -0,0 +1,92 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file defines macros for use in determining the current calling ABI.
+ *
+ * <hr>$Revision: 41586 $<hr>
+*/
+
+#ifndef __CVMX_ABI_H__
+#define __CVMX_ABI_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Check for N32 ABI, defined for 32-bit Simple Exec applications
+ and Linux N32 ABI.*/
+#if (defined _ABIN32 && _MIPS_SIM == _ABIN32)
+#define CVMX_ABI_N32
+/* Check for N64 ABI, defined for 64-bit Linux toolchain. */
+#elif (defined _ABI64 && _MIPS_SIM == _ABI64)
+#define CVMX_ABI_N64
+/* Check for O32 ABI, defined for Linux 032 ABI, not supported yet. */
+#elif (defined _ABIO32 && _MIPS_SIM == _ABIO32)
+#define CVMX_ABI_O32
+/* Check for EABI ABI, defined for 64-bit Simple Exec applications. */
+#else
+#define CVMX_ABI_EABI
+#endif
+
+#ifndef __BYTE_ORDER
+ #if defined(__BIG_ENDIAN) && !defined(__LITTLE_ENDIAN)
+ #define __BYTE_ORDER __BIG_ENDIAN
+ #elif !defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
+ #define __BYTE_ORDER __LITTLE_ENDIAN
+ #define __BIG_ENDIAN 4321
+ #elif !defined(__BIG_ENDIAN) && !defined(__LITTLE_ENDIAN)
+ #define __BIG_ENDIAN 4321
+ #define __BYTE_ORDER __BIG_ENDIAN
+ #else
+ #error Unable to determine Endian mode
+ #endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ABI_H__ */
diff --git a/cvmx-access-native.h b/cvmx-access-native.h
new file mode 100644
index 000000000000..c16ca9e96a2e
--- /dev/null
+++ b/cvmx-access-native.h
@@ -0,0 +1,667 @@
+/***********************license start***************
+ * Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+/**
+ * @file
+ * Functions for accessing memory and CSRs on Octeon when we are compiling
+ * natively.
+ *
+ * <hr>$Revision: 38306 $<hr>
+*/
+#ifndef __CVMX_ACCESS_NATIVE_H__
+#define __CVMX_ACCESS_NATIVE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Returns the Octeon processor ID.
+ *
+ * @return Octeon processor ID from COP0
+ */
+static inline uint32_t cvmx_get_proc_id(void)
+{
+#ifdef CVMX_BUILD_FOR_LINUX_USER
+ extern uint32_t cvmx_app_init_processor_id;
+ return cvmx_app_init_processor_id;
+#else
+ uint32_t id;
+ asm ("mfc0 %0, $15,0" : "=r" (id));
+ return id;
+#endif
+}
+
+/**
+ * Convert a memory pointer (void*) into a hardware compatable
+ * memory address (uint64_t). Octeon hardware widgets don't
+ * understand logical addresses.
+ *
+ * @param ptr C style memory pointer
+ * @return Hardware physical address
+ */
+static inline uint64_t cvmx_ptr_to_phys(void *ptr)
+{
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ cvmx_warn_if(ptr==NULL, "cvmx_ptr_to_phys() passed a NULL pointer\n");
+
+#ifdef CVMX_BUILD_FOR_UBOOT
+ /* U-boot is a special case, as it is running in error level, which disables the TLB completely.
+ ** U-boot may use kseg0 addresses, or may directly use physical addresses already */
+ return(CAST64(ptr) & 0x7FFFFFFF);
+#endif
+
+#ifdef __linux__
+ if (sizeof(void*) == 8)
+ {
+ /* We're running in 64 bit mode. Normally this means that we can use
+ 40 bits of address space (the hardware limit). Unfortunately there
+ is one case were we need to limit this to 30 bits, sign extended
+ 32 bit. Although these are 64 bits wide, only 30 bits can be used */
+ if ((CAST64(ptr) >> 62) == 3)
+ return CAST64(ptr) & cvmx_build_mask(30);
+ else
+ return CAST64(ptr) & cvmx_build_mask(40);
+ }
+ else
+ {
+#ifdef __KERNEL__
+ return (long)(ptr) & 0x1fffffff;
+#else
+ extern uint64_t linux_mem32_offset;
+ if (cvmx_likely(ptr))
+ return CAST64(ptr) - linux_mem32_offset;
+ else
+ return 0;
+#endif
+ }
+#elif defined(_WRS_KERNEL)
+ return (long)(ptr) & 0x7fffffff;
+#elif defined(VXWORKS_USER_MAPPINGS)
+ /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
+ 2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
+ uint64_t address = (long)ptr;
+ if (address & 0x80000000)
+ return address & 0x1fffffff; /* KSEG pointers directly map the lower 256MB and bootbus */
+ else if ((address >= 0x10000000) && (address < 0x20000000))
+ return address + 0x400000000ull; /* 256MB-512MB is a virtual mapping for the 2nd 256MB */
+ else
+ return address; /* Looks to be a 1:1 mapped userspace pointer */
+#else
+#if CVMX_USE_1_TO_1_TLB_MAPPINGS
+ /* We are assumung we're running the Simple Executive standalone. In this
+ mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
+ addresses are never used. Since we know all this, save the masking
+ cycles and do nothing */
+ return CAST64(ptr);
+#else
+
+ if (sizeof(void*) == 8)
+ {
+ /* We're running in 64 bit mode. Normally this means that we can use
+ 40 bits of address space (the hardware limit). Unfortunately there
+ is one case were we need to limit this to 30 bits, sign extended
+ 32 bit. Although these are 64 bits wide, only 30 bits can be used */
+ if ((CAST64(ptr) >> 62) == 3)
+ return CAST64(ptr) & cvmx_build_mask(30);
+ else
+ return CAST64(ptr) & cvmx_build_mask(40);
+ }
+ else
+ return (long)(ptr) & 0x7fffffff;
+
+#endif
+#endif
+}
+
+
+/**
+ * Convert a hardware physical address (uint64_t) into a
+ * memory pointer (void *).
+ *
+ * @param physical_address
+ * Hardware physical address to memory
+ * @return Pointer to memory
+ */
+static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
+{
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ cvmx_warn_if(physical_address==0, "cvmx_phys_to_ptr() passed a zero address\n");
+
+#ifdef CVMX_BUILD_FOR_UBOOT
+ /* U-boot is a special case, as it is running in error level, which disables the TLB completely.
+ ** U-boot may use kseg0 addresses, or may directly use physical addresses already */
+ if (physical_address >= 0x80000000)
+ return NULL;
+ else
+ return CASTPTR(void, (physical_address & 0x7FFFFFFF));
+#endif
+
+#ifdef __linux__
+ if (sizeof(void*) == 8)
+ {
+ /* Just set the top bit, avoiding any TLB uglyness */
+ return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
+ }
+ else
+ {
+#ifdef __KERNEL__
+ return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
+#else
+ extern uint64_t linux_mem32_offset;
+ if (cvmx_likely(physical_address))
+ return CASTPTR(void, physical_address + linux_mem32_offset);
+ else
+ return NULL;
+#endif
+ }
+#elif defined(_WRS_KERNEL)
+ return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
+#elif defined(VXWORKS_USER_MAPPINGS)
+ /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
+ 2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
+ if ((physical_address >= 0x10000000) && (physical_address < 0x20000000))
+ return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
+ else if ((physical_address >= 0x410000000ull) && (physical_address < 0x420000000ull))
+ return CASTPTR(void, physical_address - 0x400000000ull);
+ else
+ return CASTPTR(void, physical_address);
+#else
+
+#if CVMX_USE_1_TO_1_TLB_MAPPINGS
+ /* We are assumung we're running the Simple Executive standalone. In this
+ mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
+ addresses are never used. Since we know all this, save bit insert
+ cycles and do nothing */
+ return CASTPTR(void, physical_address);
+#else
+ /* Set the XKPHYS/KSEG0 bit as appropriate based on ABI */
+ if (sizeof(void*) == 8)
+ return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
+ else
+ return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
+
+#endif
+
+#endif
+}
+
+
+/* The following #if controls the definition of the macro
+ CVMX_BUILD_WRITE64. This macro is used to build a store operation to
+ a full 64bit address. With a 64bit ABI, this can be done with a simple
+ pointer access. 32bit ABIs require more complicated assembly */
+#if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+ a simple volatile pointer */
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
+{ \
+ *CASTPTR(volatile TYPE##_t, addr) = val; \
+}
+
+#elif defined(CVMX_ABI_N32)
+
+/* The N32 ABI passes all 64bit quantities in a single register, so it is
+ possible to use the arguments directly. We have to use inline assembly
+ for the actual store since a pointer would truncate the address */
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
+{ \
+ asm volatile (ST " %[v], 0(%[c])" ::[v] "r" (val), [c] "r" (addr)); \
+}
+
+#elif defined(CVMX_ABI_O32)
+
+#ifdef __KERNEL__
+#define CVMX_BUILD_WRITE64(TYPE, LT) extern void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val);
+#else
+
+/* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
+ separate registers. Assembly must be used to put them back together
+ before they're used. What should be a simple store becomes a
+ convoluted mess of shifts and ors */
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val) \
+{ \
+ if (sizeof(TYPE##_t) == 8) \
+ { \
+ uint32_t csr_addrh = csr_addr>>32; \
+ uint32_t csr_addrl = csr_addr; \
+ uint32_t valh = (uint64_t)val>>32; \
+ uint32_t vall = val; \
+ uint32_t tmp1; \
+ uint32_t tmp2; \
+ uint32_t tmp3; \
+ \
+ asm volatile ( \
+ ".set push\n" \
+ ".set mips64\n" \
+ "dsll %[tmp1], %[valh], 32\n" \
+ "dsll %[tmp2], %[csrh], 32\n" \
+ "dsll %[tmp3], %[vall], 32\n" \
+ "dsrl %[tmp3], %[tmp3], 32\n" \
+ "or %[tmp1], %[tmp1], %[tmp3]\n" \
+ "dsll %[tmp3], %[csrl], 32\n" \
+ "dsrl %[tmp3], %[tmp3], 32\n" \
+ "or %[tmp2], %[tmp2], %[tmp3]\n" \
+ ST " %[tmp1], 0(%[tmp2])\n" \
+ ".set pop\n" \
+ : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3)\
+ : [valh] "r" (valh), [vall] "r" (vall), \
+ [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
+ ); \
+ } \
+ else \
+ { \
+ uint32_t csr_addrh = csr_addr>>32; \
+ uint32_t csr_addrl = csr_addr; \
+ uint32_t tmp1; \
+ uint32_t tmp2; \
+ \
+ asm volatile ( \
+ ".set push\n" \
+ ".set mips64\n" \
+ "dsll %[tmp1], %[csrh], 32\n" \
+ "dsll %[tmp2], %[csrl], 32\n" \
+ "dsrl %[tmp2], %[tmp2], 32\n" \
+ "or %[tmp1], %[tmp1], %[tmp2]\n" \
+ ST " %[val], 0(%[tmp1])\n" \
+ ".set pop\n" \
+ : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2) \
+ : [val] "r" (val), [csrh] "r" (csr_addrh), \
+ [csrl] "r" (csr_addrl) \
+ ); \
+ } \
+}
+
+#endif
+
+#else
+
+/* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
+#error: Unsupported ABI
+
+#endif
+
+/* The following #if controls the definition of the macro
+ CVMX_BUILD_READ64. This macro is used to build a load operation from
+ a full 64bit address. With a 64bit ABI, this can be done with a simple
+ pointer access. 32bit ABIs require more complicated assembly */
+#if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+ a simple volatile pointer */
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
+{ \
+ return *CASTPTR(volatile TYPE##_t, addr); \
+}
+
+#elif defined(CVMX_ABI_N32)
+
+/* The N32 ABI passes all 64bit quantities in a single register, so it is
+ possible to use the arguments directly. We have to use inline assembly
+ for the actual store since a pointer would truncate the address */
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
+{ \
+ TYPE##_t val; \
+ asm volatile (LT " %[v], 0(%[c])": [v] "=r" (val) : [c] "r" (addr));\
+ return val; \
+}
+
+#elif defined(CVMX_ABI_O32)
+
+#ifdef __KERNEL__
+#define CVMX_BUILD_READ64(TYPE, LT) extern TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr);
+#else
+
+/* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
+ separate registers. Assembly must be used to put them back together
+ before they're used. What should be a simple load becomes a
+ convoluted mess of shifts and ors */
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr) \
+{ \
+ if (sizeof(TYPE##_t) == 8) \
+ { \
+ uint32_t csr_addrh = csr_addr>>32; \
+ uint32_t csr_addrl = csr_addr; \
+ uint32_t valh; \
+ uint32_t vall; \
+ \
+ asm volatile ( \
+ ".set push\n" \
+ ".set mips64\n" \
+ "dsll %[valh], %[csrh], 32\n" \
+ "dsll %[vall], %[csrl], 32\n" \
+ "dsrl %[vall], %[vall], 32\n" \
+ "or %[valh], %[valh], %[vall]\n" \
+ LT " %[vall], 0(%[valh])\n" \
+ "dsrl %[valh], %[vall], 32\n" \
+ "sll %[vall], 0\n" \
+ "sll %[valh], 0\n" \
+ ".set pop\n" \
+ : [valh] "=&r" (valh), [vall] "=&r" (vall) \
+ : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
+ ); \
+ return ((uint64_t)valh<<32) | vall; \
+ } \
+ else \
+ { \
+ uint32_t csr_addrh = csr_addr>>32; \
+ uint32_t csr_addrl = csr_addr; \
+ TYPE##_t val; \
+ uint32_t tmp; \
+ \
+ asm volatile ( \
+ ".set push\n" \
+ ".set mips64\n" \
+ "dsll %[val], %[csrh], 32\n" \
+ "dsll %[tmp], %[csrl], 32\n" \
+ "dsrl %[tmp], %[tmp], 32\n" \
+ "or %[val], %[val], %[tmp]\n" \
+ LT " %[val], 0(%[val])\n" \
+ ".set pop\n" \
+ : [val] "=&r" (val), [tmp] "=&r" (tmp) \
+ : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
+ ); \
+ return val; \
+ } \
+}
+
+#endif /* __KERNEL__ */
+
+#else
+
+/* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
+#error: Unsupported ABI
+
+#endif
+
+/* The following defines 8 functions for writing to a 64bit address. Each
+ takes two arguments, the address and the value to write.
+ cvmx_write64_int64 cvmx_write64_uint64
+ cvmx_write64_int32 cvmx_write64_uint32
+ cvmx_write64_int16 cvmx_write64_uint16
+ cvmx_write64_int8 cvmx_write64_uint8 */
+CVMX_BUILD_WRITE64(int64, "sd");
+CVMX_BUILD_WRITE64(int32, "sw");
+CVMX_BUILD_WRITE64(int16, "sh");
+CVMX_BUILD_WRITE64(int8, "sb");
+CVMX_BUILD_WRITE64(uint64, "sd");
+CVMX_BUILD_WRITE64(uint32, "sw");
+CVMX_BUILD_WRITE64(uint16, "sh");
+CVMX_BUILD_WRITE64(uint8, "sb");
+
+/* The following defines 8 functions for reading from a 64bit address. Each
+ takes the address as the only argument
+ cvmx_read64_int64 cvmx_read64_uint64
+ cvmx_read64_int32 cvmx_read64_uint32
+ cvmx_read64_int16 cvmx_read64_uint16
+ cvmx_read64_int8 cvmx_read64_uint8 */
+CVMX_BUILD_READ64(int64, "ld");
+CVMX_BUILD_READ64(int32, "lw");
+CVMX_BUILD_READ64(int16, "lh");
+CVMX_BUILD_READ64(int8, "lb");
+CVMX_BUILD_READ64(uint64, "ld");
+CVMX_BUILD_READ64(uint32, "lw");
+CVMX_BUILD_READ64(uint16, "lhu");
+CVMX_BUILD_READ64(uint8, "lbu");
+
+static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
+{
+ cvmx_write64_uint64(csr_addr, val);
+
+ /* Perform an immediate read after every write to an RSL register to force
+ the write to complete. It doesn't matter what RSL read we do, so we
+ choose CVMX_MIO_BOOT_BIST_STAT because it is fast and harmless */
+ if ((csr_addr >> 40) == (0x800118))
+ cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT);
+}
+
+static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
+{
+ cvmx_write64_uint64(io_addr, val);
+}
+
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
+{
+ return cvmx_read64_uint64(csr_addr);
+}
+
+static inline void cvmx_send_single(uint64_t data)
+{
+ const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
+ cvmx_write64_uint64(CVMX_IOBDMA_SENDSINGLE, data);
+}
+
+static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
+{
+ union
+ {
+ uint64_t u64;
+ struct {
+ uint64_t scraddr : 8;
+ uint64_t len : 8;
+ uint64_t addr :48;
+ } s;
+ } addr;
+ addr.u64 = csr_addr;
+ addr.s.scraddr = scraddr >> 3;
+ addr.s.len = 1;
+ cvmx_send_single(addr.u64);
+}
+
+
+/**
+ * Number of the Core on which the program is currently running.
+ *
+ * @return Number of cores
+ */
+static inline unsigned int cvmx_get_core_num(void)
+{
+ unsigned int core_num;
+ CVMX_RDHWRNV(core_num, 0);
+ return core_num;
+}
+
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for POP instruction.
+ *
+ * @param val 32 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+static inline uint32_t cvmx_pop(uint32_t val)
+{
+ uint32_t pop;
+ CVMX_POP(pop, val);
+ return pop;
+}
+
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for DPOP instruction.
+ *
+ * @param val 64 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+static inline int cvmx_dpop(uint64_t val)
+{
+ int pop;
+ CVMX_DPOP(pop, val);
+ return pop;
+}
+
+
+/**
+ * Provide current cycle counter as a return value
+ *
+ * @return current cycle counter
+ */
+static inline uint64_t cvmx_get_cycle(void)
+{
+#if defined(CVMX_ABI_O32)
+ uint32_t tmp_low, tmp_hi;
+
+ asm volatile (
+ " .set push \n"
+ " .set mips64r2 \n"
+ " .set noreorder \n"
+ " rdhwr %[tmpl], $31 \n"
+ " dsrl %[tmph], %[tmpl], 32 \n"
+ " sll %[tmpl], 0 \n"
+ " sll %[tmph], 0 \n"
+ " .set pop \n"
+ : [tmpl] "=&r" (tmp_low), [tmph] "=&r" (tmp_hi) : );
+
+ return(((uint64_t)tmp_hi << 32) + tmp_low);
+#else
+ uint64_t cycle;
+ CVMX_RDHWR(cycle, 31);
+ return(cycle);
+#endif
+}
+
+
+/**
+ * Reads a chip global cycle counter. This counts CPU cycles since
+ * chip reset. The counter is 64 bit.
+ * This register does not exist on CN38XX pass 1 silicion
+ *
+ * @return Global chip cycle count since chip reset.
+ */
+static inline uint64_t cvmx_get_cycle_global(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
+ return 0;
+ else
+ return cvmx_read64_uint64(CVMX_IPD_CLK_COUNT);
+}
+
+
+/**
+ * Wait for the specified number of cycle
+ *
+ * @param cycles
+ */
+static inline void cvmx_wait(uint64_t cycles)
+{
+ uint64_t done = cvmx_get_cycle() + cycles;
+
+ while (cvmx_get_cycle() < done)
+ {
+ /* Spin */
+ }
+}
+
+
+/**
+ * Wait for the specified number of micro seconds
+ *
+ * @param usec micro seconds to wait
+ */
+static inline void cvmx_wait_usec(uint64_t usec)
+{
+ uint64_t done = cvmx_get_cycle() + usec * cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
+ while (cvmx_get_cycle() < done)
+ {
+ /* Spin */
+ }
+}
+
+
+/**
+ * Perform a soft reset of Octeon
+ *
+ * @return
+ */
+static inline void cvmx_reset_octeon(void)
+{
+ cvmx_ciu_soft_rst_t ciu_soft_rst;
+ ciu_soft_rst.u64 = 0;
+ ciu_soft_rst.s.soft_rst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
+}
+
+
+/**
+ * Read a byte of fuse data
+ * @param byte_addr address to read
+ *
+ * @return fuse value: 0 or 1
+ */
+static inline uint8_t cvmx_fuse_read_byte(int byte_addr)
+{
+ cvmx_mio_fus_rcmd_t read_cmd;
+
+ read_cmd.u64 = 0;
+ read_cmd.s.addr = byte_addr;
+ read_cmd.s.pend = 1;
+ cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
+ while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD)) && read_cmd.s.pend)
+ ;
+ return(read_cmd.s.dat);
+}
+
+
+/**
+ * Read a single fuse bit
+ *
+ * @param fuse Fuse number (0-1024)
+ *
+ * @return fuse value: 0 or 1
+ */
+static inline int cvmx_fuse_read(int fuse)
+{
+ return((cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ACCESS_NATIVE_H__ */
+
diff --git a/cvmx-access.h b/cvmx-access.h
new file mode 100644
index 000000000000..d0da7caea22a
--- /dev/null
+++ b/cvmx-access.h
@@ -0,0 +1,228 @@
+/***********************license start***************
+ * Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+/**
+ * @file
+ * Function prototypes for accessing memory and CSRs on Octeon.
+ *
+ * <hr>$Revision: 38306 $<hr>
+*/
+#ifndef __CVMX_ACCESS_H__
+#define __CVMX_ACCESS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* We're going to assume that if we are compiling for Mips then we must be
+ running natively on Octoen. It is possible that this code could be
+ compiled on a non Octeon Mips that is acting as a PCI/PCIe host. In this
+ case this assumption will be wrong and cause issues We can't key off of
+ __octeon__ since some people use stock gcc toolchains */
+#if defined(__mips__) && !defined(CVMX_BUILD_FOR_LINUX_HOST)
+ #define CVMX_FUNCTION static inline
+#else
+ #define CVMX_FUNCTION extern
+#endif
+
+/**
+ * simprintf uses simulator tricks to speed up printouts. The format
+ * and args are passed to the simulator and processed natively on the host.
+ * Simprintf is limited to 7 arguments, and they all must use %ll (long long)
+ * format specifiers to be displayed correctly.
+ *
+ * @param format
+ *
+ * @return
+ */
+EXTERN_ASM void simprintf(const char *format, ...);
+
+/**
+ * This function performs some default initialization of the Octeon executive.
+ * It initializes the cvmx_bootmem memory allocator with the list of physical
+ * memory provided by the bootloader, and creates 1-1 TLB mappings for this
+ * memory. This function should be called on all cores that will use either the
+ * bootmem allocator or the 1-1 TLB mappings. Applications which require a
+ * different configuration can replace this function with a suitable application
+ * specific one.
+ *
+ * @return 0 on success
+ * -1 on failure
+ */
+extern int cvmx_user_app_init(void);
+
+/**
+ * Returns the Octeon processor ID.
+ *
+ * @return Octeon processor ID from COP0
+ */
+CVMX_FUNCTION uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
+
+/**
+ * Convert a memory pointer (void*) into a hardware compatable
+ * memory address (uint64_t). Octeon hardware widgets don't
+ * understand logical addresses.
+ *
+ * @param ptr C style memory pointer
+ * @return Hardware physical address
+ */
+CVMX_FUNCTION uint64_t cvmx_ptr_to_phys(void *ptr);
+
+/**
+ * Convert a hardware physical address (uint64_t) into a
+ * memory pointer (void *).
+ *
+ * @param physical_address
+ * Hardware physical address to memory
+ * @return Pointer to memory
+ */
+CVMX_FUNCTION void *cvmx_phys_to_ptr(uint64_t physical_address);
+
+CVMX_FUNCTION void cvmx_write64_int64(uint64_t address, int64_t value);
+CVMX_FUNCTION void cvmx_write64_uint64(uint64_t address, uint64_t value);
+CVMX_FUNCTION void cvmx_write64_int32(uint64_t address, int32_t value);
+CVMX_FUNCTION void cvmx_write64_uint32(uint64_t address, uint32_t value);
+CVMX_FUNCTION void cvmx_write64_int16(uint64_t address, int16_t value);
+CVMX_FUNCTION void cvmx_write64_uint16(uint64_t address, uint16_t value);
+CVMX_FUNCTION void cvmx_write64_int8(uint64_t address, int8_t value);
+CVMX_FUNCTION void cvmx_write64_uint8(uint64_t address, uint8_t value);
+CVMX_FUNCTION void cvmx_write_csr(uint64_t csr_addr, uint64_t val);
+CVMX_FUNCTION void cvmx_write_io(uint64_t io_addr, uint64_t val);
+
+CVMX_FUNCTION int64_t cvmx_read64_int64(uint64_t address);
+CVMX_FUNCTION uint64_t cvmx_read64_uint64(uint64_t address);
+CVMX_FUNCTION int32_t cvmx_read64_int32(uint64_t address);
+CVMX_FUNCTION uint32_t cvmx_read64_uint32(uint64_t address);
+CVMX_FUNCTION int16_t cvmx_read64_int16(uint64_t address);
+CVMX_FUNCTION uint16_t cvmx_read64_uint16(uint64_t address);
+CVMX_FUNCTION int8_t cvmx_read64_int8(uint64_t address);
+CVMX_FUNCTION uint8_t cvmx_read64_uint8(uint64_t address);
+CVMX_FUNCTION uint64_t cvmx_read_csr(uint64_t csr_addr);
+
+CVMX_FUNCTION void cvmx_send_single(uint64_t data);
+CVMX_FUNCTION void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr);
+
+/**
+ * Number of the Core on which the program is currently running.
+ *
+ * @return Number of cores
+ */
+CVMX_FUNCTION unsigned int cvmx_get_core_num(void);
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for POP instruction.
+ *
+ * @param val 32 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+CVMX_FUNCTION uint32_t cvmx_pop(uint32_t val);
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for DPOP instruction.
+ *
+ * @param val 64 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+CVMX_FUNCTION int cvmx_dpop(uint64_t val);
+
+/**
+ * Provide current cycle counter as a return value
+ *
+ * @return current cycle counter
+ */
+CVMX_FUNCTION uint64_t cvmx_get_cycle(void);
+
+/**
+ * Reads a chip global cycle counter. This counts CPU cycles since
+ * chip reset. The counter is 64 bit.
+ * This register does not exist on CN38XX pass 1 silicion
+ *
+ * @return Global chip cycle count since chip reset.
+ */
+CVMX_FUNCTION uint64_t cvmx_get_cycle_global(void);
+
+/**
+ * Wait for the specified number of cycle
+ *
+ * @param cycles
+ */
+CVMX_FUNCTION void cvmx_wait(uint64_t cycles);
+
+/**
+ * Wait for the specified number of micro seconds
+ *
+ * @param usec micro seconds to wait
+ */
+CVMX_FUNCTION void cvmx_wait_usec(uint64_t usec);
+
+/**
+ * Perform a soft reset of Octeon
+ *
+ * @return
+ */
+CVMX_FUNCTION void cvmx_reset_octeon(void);
+
+/**
+ * Read a byte of fuse data
+ * @param byte_addr address to read
+ *
+ * @return fuse value: 0 or 1
+ */
+CVMX_FUNCTION uint8_t cvmx_fuse_read_byte(int byte_addr);
+
+/**
+ * Read a single fuse bit
+ *
+ * @param fuse Fuse number (0-1024)
+ *
+ * @return fuse value: 0 or 1
+ */
+CVMX_FUNCTION int cvmx_fuse_read(int fuse);
+
+#undef CVMX_FUNCTION
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ACCESS_H__ */
+
diff --git a/cvmx-address.h b/cvmx-address.h
new file mode 100644
index 000000000000..096a68ab958f
--- /dev/null
+++ b/cvmx-address.h
@@ -0,0 +1,253 @@
+/***********************license start***************
+ * Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+/**
+ * @file
+ * Typedefs and defines for working with Octeon physical addresses.
+ *
+ * <hr>$Revision: 38306 $<hr>
+*/
+#ifndef __CVMX_ADDRESS_H__
+#define __CVMX_ADDRESS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ CVMX_MIPS_SPACE_XKSEG = 3LL,
+ CVMX_MIPS_SPACE_XKPHYS = 2LL,
+ CVMX_MIPS_SPACE_XSSEG = 1LL,
+ CVMX_MIPS_SPACE_XUSEG = 0LL
+} cvmx_mips_space_t;
+
+typedef enum {
+ CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,
+ CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,
+ CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,
+ CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
+} cvmx_mips_xkseg_space_t;
+
+// decodes <14:13> of a kseg3 window address
+typedef enum {
+ CVMX_ADD_WIN_SCR = 0L,
+ CVMX_ADD_WIN_DMA = 1L, // see cvmx_add_win_dma_dec_t for further decode
+ CVMX_ADD_WIN_UNUSED = 2L,
+ CVMX_ADD_WIN_UNUSED2 = 3L
+} cvmx_add_win_dec_t;
+
+// decode within DMA space
+typedef enum {
+ CVMX_ADD_WIN_DMA_ADD = 0L, // add store data to the write buffer entry, allocating it if necessary
+ CVMX_ADD_WIN_DMA_SENDMEM = 1L, // send out the write buffer entry to DRAM
+ // store data must be normal DRAM memory space address in this case
+ CVMX_ADD_WIN_DMA_SENDDMA = 2L, // send out the write buffer entry as an IOBDMA command
+ // see CVMX_ADD_WIN_DMA_SEND_DEC for data contents
+ CVMX_ADD_WIN_DMA_SENDIO = 3L, // send out the write buffer entry as an IO write
+ // store data must be normal IO space address in this case
+ CVMX_ADD_WIN_DMA_SENDSINGLE = 4L, // send out a single-tick command on the NCB bus
+ // no write buffer data needed/used
+} cvmx_add_win_dma_dec_t;
+
+
+
+/**
+ * Physical Address Decode
+ *
+ * Octeon-I HW never interprets this X (<39:36> reserved
+ * for future expansion), software should set to 0.
+ *
+ * - 0x0 XXX0 0000 0000 to DRAM Cached
+ * - 0x0 XXX0 0FFF FFFF
+ *
+ * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
+ * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
+ *
+ * - 0x0 XXX0 2000 0000 to DRAM Cached
+ * - 0x0 XXXF FFFF FFFF
+ *
+ * - 0x1 00X0 0000 0000 to Boot Bus Uncached
+ * - 0x1 00XF FFFF FFFF
+ *
+ * - 0x1 01X0 0000 0000 to Other NCB Uncached
+ * - 0x1 FFXF FFFF FFFF devices
+ *
+ * Decode of all Octeon addresses
+ */
+typedef union {
+
+ uint64_t u64;
+
+ struct {
+ cvmx_mips_space_t R : 2;
+ uint64_t offset :62;
+ } sva; // mapped or unmapped virtual address
+
+ struct {
+ uint64_t zeroes :33;
+ uint64_t offset :31;
+ } suseg; // mapped USEG virtual addresses (typically)
+
+ struct {
+ uint64_t ones :33;
+ cvmx_mips_xkseg_space_t sp : 2;
+ uint64_t offset :29;
+ } sxkseg; // mapped or unmapped virtual address
+
+ struct {
+ cvmx_mips_space_t R : 2; // CVMX_MIPS_SPACE_XKPHYS in this case
+ uint64_t cca : 3; // ignored by octeon
+ uint64_t mbz :10;
+ uint64_t pa :49; // physical address
+ } sxkphys; // physical address accessed through xkphys unmapped virtual address
+
+ struct {
+ uint64_t mbz :15;
+ uint64_t is_io : 1; // if set, the address is uncached and resides on MCB bus
+ uint64_t did : 8; // the hardware ignores this field when is_io==0, else device ID
+ uint64_t unaddr: 4; // the hardware ignores <39:36> in Octeon I
+ uint64_t offset :36;
+ } sphys; // physical address
+
+ struct {
+ uint64_t zeroes :24; // techically, <47:40> are dont-cares
+ uint64_t unaddr: 4; // the hardware ignores <39:36> in Octeon I
+ uint64_t offset :36;
+ } smem; // physical mem address
+
+ struct {
+ uint64_t mem_region :2;
+ uint64_t mbz :13;
+ uint64_t is_io : 1; // 1 in this case
+ uint64_t did : 8; // the hardware ignores this field when is_io==0, else device ID
+ uint64_t unaddr: 4; // the hardware ignores <39:36> in Octeon I
+ uint64_t offset :36;
+ } sio; // physical IO address
+
+ struct {
+ uint64_t ones : 49;
+ cvmx_add_win_dec_t csrdec : 2; // CVMX_ADD_WIN_SCR (0) in this case
+ uint64_t addr : 13;
+ } sscr; // scratchpad virtual address - accessed through a window at the end of kseg3
+
+ // there should only be stores to IOBDMA space, no loads
+ struct {
+ uint64_t ones : 49;
+ cvmx_add_win_dec_t csrdec : 2; // CVMX_ADD_WIN_DMA (1) in this case
+ uint64_t unused2: 3;
+ cvmx_add_win_dma_dec_t type : 3;
+ uint64_t addr : 7;
+ } sdma; // IOBDMA virtual address - accessed through a window at the end of kseg3
+
+ struct {
+ uint64_t didspace : 24;
+ uint64_t unused : 40;
+ } sfilldidspace;
+
+} cvmx_addr_t;
+
+/* These macros for used by 32 bit applications */
+
+#define CVMX_MIPS32_SPACE_KSEG0 1l
+#define CVMX_ADD_SEG32(segment, add) (((int32_t)segment << 31) | (int32_t)(add))
+
+/* Currently all IOs are performed using XKPHYS addressing. Linux uses the
+ CvmMemCtl register to enable XKPHYS addressing to IO space from user mode.
+ Future OSes may need to change the upper bits of IO addresses. The
+ following define controls the upper two bits for all IO addresses generated
+ by the simple executive library */
+#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
+
+/* These macros simplify the process of creating common IO addresses */
+#define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add))
+#ifndef CVMX_ADD_IO_SEG
+#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
+#endif
+#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))
+#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40)
+#define CVMX_FULL_DID(did,subdid) (((did) << 3) | (subdid))
+
+
+// from include/ncb_rsl_id.v
+#define CVMX_OCT_DID_MIS 0ULL // misc stuff
+#define CVMX_OCT_DID_GMX0 1ULL
+#define CVMX_OCT_DID_GMX1 2ULL
+#define CVMX_OCT_DID_PCI 3ULL
+#define CVMX_OCT_DID_KEY 4ULL
+#define CVMX_OCT_DID_FPA 5ULL
+#define CVMX_OCT_DID_DFA 6ULL
+#define CVMX_OCT_DID_ZIP 7ULL
+#define CVMX_OCT_DID_RNG 8ULL
+#define CVMX_OCT_DID_IPD 9ULL
+#define CVMX_OCT_DID_PKT 10ULL
+#define CVMX_OCT_DID_TIM 11ULL
+#define CVMX_OCT_DID_TAG 12ULL
+// the rest are not on the IO bus
+#define CVMX_OCT_DID_L2C 16ULL
+#define CVMX_OCT_DID_LMC 17ULL
+#define CVMX_OCT_DID_SPX0 18ULL
+#define CVMX_OCT_DID_SPX1 19ULL
+#define CVMX_OCT_DID_PIP 20ULL
+#define CVMX_OCT_DID_ASX0 22ULL
+#define CVMX_OCT_DID_ASX1 23ULL
+#define CVMX_OCT_DID_IOB 30ULL
+
+#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT,2ULL)
+#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG,0ULL)
+#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG,1ULL)
+#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG,2ULL)
+#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG,3ULL)
+#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG,4ULL)
+#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG,7ULL)
+#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB,0ULL)
+#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM,0ULL)
+#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY,0ULL)
+#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI,6ULL)
+#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS,0ULL)
+#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI,0ULL)
+#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD,7ULL)
+#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA,7ULL)
+#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS,7ULL)
+#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP,0ULL)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ADDRESS_H__ */
+
diff --git a/cvmx-app-init-linux.c b/cvmx-app-init-linux.c
new file mode 100644
index 000000000000..ed83b50dbe78
--- /dev/null
+++ b/cvmx-app-init-linux.c
@@ -0,0 +1,457 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+/**
+ * @file
+ * Simple executive application initialization for Linux user space. This
+ * file should be used instead of cvmx-app-init.c for running simple executive
+ * applications under Linux in userspace. The following are some of the key
+ * points to remember when writing applications to run both under the
+ * standalone simple executive and userspace under Linux.
+ *
+ * -# Application main must be called "appmain" under Linux. Use and ifdef
+ * based on __linux__ to determine the proper name.
+ * -# Be careful to use cvmx_ptr_to_phys() and cvmx_phys_to_ptr. The simple
+ * executive 1-1 TLB mappings allow you to be sloppy and interchange
+ * hardware addresses with virtual address. This isn't true under Linux.
+ * -# If you're talking directly to hardware, be careful. The normal Linux
+ * protections are circumvented. If you do something bad, Linux won't
+ * save you.
+ * -# Most hardware can only be initialized once. Unless you're very careful,
+ * this also means you Linux application can only run once.
+ *
+ * <hr>$Revision: 41757 $<hr>
+ *
+ */
+#define _GNU_SOURCE
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <signal.h>
+#include <sys/statfs.h>
+#include <sys/wait.h>
+#include <sys/sysmips.h>
+#include <sched.h>
+#include <octeon-app-init.h>
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-atomic.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-coremask.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-bootmem.h"
+
+int octeon_model_version_check(uint32_t chip_id);
+
+#define OCTEON_ECLOCK_MULT_INPUT_X16 ((int)(33.4*16))
+
+/* Applications using the simple executive libraries under Linux userspace must
+ rename their "main" function to match the prototype below. This allows the
+ simple executive to perform needed memory initialization and process
+ creation before the application runs. */
+extern int appmain(int argc, const char *argv[]);
+
+/* These two external addresses provide the beginning and end markers for the
+ CVMX_SHARED section. These are defined by the cvmx-shared.ld linker script.
+ If they aren't defined, you probably forgot to link using this script. */
+extern void __cvmx_shared_start;
+extern void __cvmx_shared_end;
+extern uint64_t linux_mem32_min;
+extern uint64_t linux_mem32_max;
+extern uint64_t linux_mem32_wired;
+extern uint64_t linux_mem32_offset;
+
+#define MIPS_CAVIUM_XKPHYS_READ 2010 /* XKPHYS */
+#define MIPS_CAVIUM_XKPHYS_WRITE 2011 /* XKPHYS */
+
+static CVMX_SHARED int32_t warn_count;
+
+/**
+ * This function performs some default initialization of the Octeon executive. It initializes
+ * the cvmx_bootmem memory allocator with the list of physical memory shared by the bootloader.
+ * This function should be called on all cores that will use the bootmem allocator.
+ * Applications which require a different configuration can replace this function with a suitable application
+ * specific one.
+ *
+ * @return 0 on success
+ * -1 on failure
+ */
+int cvmx_user_app_init(void)
+{
+ return 0;
+}
+
+
+/**
+ * Simulator magic is not supported in user mode under Linux.
+ * This version of simprintf simply calls the underlying C
+ * library printf for output. It also makes sure that two
+ * calls to simprintf provide atomic output.
+ *
+ * @param fmt Format string in the same format as printf.
+ */
+void simprintf(const char *fmt, ...)
+{
+ CVMX_SHARED static cvmx_spinlock_t simprintf_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
+ va_list ap;
+
+ cvmx_spinlock_lock(&simprintf_lock);
+ printf("SIMPRINTF(%d): ", (int)cvmx_get_core_num());
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+ cvmx_spinlock_unlock(&simprintf_lock);
+}
+
+
+/**
+ * Setup the CVMX_SHARED data section to be shared across
+ * all processors running this application. A memory mapped
+ * region is allocated using shm_open and mmap. The current
+ * contents of the CVMX_SHARED section are copied into the
+ * region. Then the new region is remapped to replace the
+ * existing CVMX_SHARED data.
+ *
+ * This function will display a message and abort the
+ * application under any error conditions. The Linux tmpfs
+ * filesystem must be mounted under /dev/shm.
+ */
+static void setup_cvmx_shared(void)
+{
+ const char *SHM_NAME = "cvmx_shared";
+ unsigned long shared_size = &__cvmx_shared_end - &__cvmx_shared_start;
+ int fd;
+
+ /* If there isn't and shared data we can skip all this */
+ if (shared_size)
+ {
+ char shm_name[30];
+ printf("CVMX_SHARED: %p-%p\n", &__cvmx_shared_start, &__cvmx_shared_end);
+
+#ifdef __UCLIBC__
+ const char *defaultdir = "/dev/shm/";
+ struct statfs f;
+ int pid;
+ /* The canonical place is /dev/shm. */
+ if (statfs (defaultdir, &f) == 0)
+ {
+ pid = getpid();
+ sprintf (shm_name, "%s%s-%d", defaultdir, SHM_NAME, pid);
+ }
+ else
+ {
+ perror("/dev/shm is not mounted");
+ exit(-1);
+ }
+
+ /* shm_open(), shm_unlink() are not implemented in uClibc. Do the
+ same thing using open() and close() system calls. */
+ fd = open (shm_name, O_RDWR | O_CREAT | O_TRUNC, 0);
+
+ if (fd < 0)
+ {
+ perror("Failed to open CVMX_SHARED(shm_name)");
+ exit(errno);
+ }
+
+ unlink (shm_name);
+#else
+ sprintf(shm_name, "%s-%d", SHM_NAME, getpid());
+ /* Open a new shared memory region for use as CVMX_SHARED */
+ fd = shm_open(shm_name, O_RDWR | O_CREAT | O_TRUNC, 0);
+ if (fd <0)
+ {
+ perror("Failed to setup CVMX_SHARED(shm_open)");
+ exit(errno);
+ }
+
+ /* We don't want the file on the filesystem. Immediately unlink it so
+ another application can create its own shared region */
+ shm_unlink(shm_name);
+#endif
+
+ /* Resize the region to match the size of CVMX_SHARED */
+ ftruncate(fd, shared_size);
+
+ /* Map the region into some random location temporarily so we can
+ copy the shared data to it */
+ void *ptr = mmap(NULL, shared_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (ptr == NULL)
+ {
+ perror("Failed to setup CVMX_SHARED(mmap copy)");
+ exit(errno);
+ }
+
+ /* Copy CVMX_SHARED to the new shared region so we don't lose
+ initializers */
+ memcpy(ptr, &__cvmx_shared_start, shared_size);
+ munmap(ptr, shared_size);
+
+ /* Remap the shared region to replace the old CVMX_SHARED region */
+ ptr = mmap(&__cvmx_shared_start, shared_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
+ if (ptr == NULL)
+ {
+ perror("Failed to setup CVMX_SHARED(mmap final)");
+ exit(errno);
+ }
+
+ /* Once mappings are setup, the file handle isn't needed anymore */
+ close(fd);
+ }
+}
+
+
+/**
+ * Shutdown and free the shared CVMX_SHARED region setup by
+ * setup_cvmx_shared.
+ */
+static void shutdown_cvmx_shared(void)
+{
+ unsigned long shared_size = &__cvmx_shared_end - &__cvmx_shared_start;
+ if (shared_size)
+ munmap(&__cvmx_shared_start, shared_size);
+}
+
+
+/**
+ * Setup access to the CONFIG_CAVIUM_RESERVE32 memory section
+ * created by the kernel. This memory is used for shared
+ * hardware buffers with 32 bit userspace applications.
+ */
+static void setup_reserve32(void)
+{
+ if (linux_mem32_min && linux_mem32_max)
+ {
+ int region_size = linux_mem32_max - linux_mem32_min + 1;
+ int mmap_flags = MAP_SHARED;
+ void *linux_mem32_base_ptr = NULL;
+
+ /* Although not strictly necessary, we are going to mmap() the wired
+ TLB region so it is in the process page tables. These pages will
+ never fault in, but they will allow GDB to access the wired
+ region. We need the mappings to exactly match the wired TLB
+ entry. */
+ if (linux_mem32_wired)
+ {
+ mmap_flags |= MAP_FIXED;
+ linux_mem32_base_ptr = CASTPTR(void, (1ull<<31) - region_size);
+ }
+
+ int fd = open("/dev/mem", O_RDWR);
+ if (fd < 0)
+ {
+ perror("ERROR opening /dev/mem");
+ exit(-1);
+ }
+
+ linux_mem32_base_ptr = mmap64(linux_mem32_base_ptr,
+ region_size,
+ PROT_READ | PROT_WRITE,
+ mmap_flags,
+ fd,
+ linux_mem32_min);
+ close(fd);
+
+ if (MAP_FAILED == linux_mem32_base_ptr)
+ {
+ perror("Error mapping reserve32");
+ exit(-1);
+ }
+
+ linux_mem32_offset = CAST64(linux_mem32_base_ptr) - linux_mem32_min;
+ }
+}
+
+
+/**
+ * Main entrypoint of the application. Here we setup shared
+ * memory and fork processes for each cpu. This simulates the
+ * normal simple executive environment of one process per
+ * cpu core.
+ *
+ * @param argc Number of command line arguments
+ * @param argv The command line arguments
+ * @return Return value for the process
+ */
+int main(int argc, const char *argv[])
+{
+ CVMX_SHARED static cvmx_spinlock_t mask_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
+ CVMX_SHARED static int32_t pending_fork;
+ unsigned long cpumask;
+ unsigned long cpu;
+ int lastcpu = 0;
+
+ cvmx_sysinfo_linux_userspace_initialize();
+
+ if (sizeof(void*) == 4)
+ {
+ if (linux_mem32_min)
+ setup_reserve32();
+ else
+ {
+ printf("\nFailed to access 32bit shared memory region. Most likely the Kernel\n"
+ "has not been configured for 32bit shared memory access. Check the\n"
+ "kernel configuration.\n"
+ "Aborting...\n\n");
+ exit(-1);
+ }
+ }
+
+ setup_cvmx_shared();
+ cvmx_bootmem_init(cvmx_sysinfo_get()->phy_mem_desc_ptr);
+
+ /* Check to make sure the Chip version matches the configured version */
+ octeon_model_version_check(cvmx_get_proc_id());
+
+ /* Get the list of logical cpus we should run on */
+ if (sched_getaffinity(0, sizeof(cpumask), (cpu_set_t*)&cpumask))
+ {
+ perror("sched_getaffinity failed");
+ exit(errno);
+ }
+
+ cvmx_sysinfo_t *system_info = cvmx_sysinfo_get();
+
+ cvmx_atomic_set32(&pending_fork, 1);
+ for (cpu=0; cpu<16; cpu++)
+ {
+ if (cpumask & (1<<cpu))
+ {
+ /* Turn off the bit for this CPU number. We've counted him */
+ cpumask ^= (1<<cpu);
+ /* If this is the last CPU to run on, use this process instead of forking another one */
+ if (cpumask == 0)
+ {
+ lastcpu = 1;
+ break;
+ }
+ /* Increment the number of CPUs running this app */
+ cvmx_atomic_add32(&pending_fork, 1);
+ /* Flush all IO streams before the fork. Otherwise any buffered
+ data in the C library will be duplicated. This results in
+ duplicate output from a single print */
+ fflush(NULL);
+ /* Fork a process for the new CPU */
+ int pid = fork();
+ if (pid == 0)
+ {
+ break;
+ }
+ else if (pid == -1)
+ {
+ perror("Fork failed");
+ exit(errno);
+ }
+ }
+ }
+
+ /* Set affinity to lock me to the correct CPU */
+ cpumask = (1<<cpu);
+ if (sched_setaffinity(0, sizeof(cpumask), (cpu_set_t*)&cpumask))
+ {
+ perror("sched_setaffinity failed");
+ exit(errno);
+ }
+
+ cvmx_spinlock_lock(&mask_lock);
+ system_info->core_mask |= 1<<cvmx_get_core_num();
+ cvmx_atomic_add32(&pending_fork, -1);
+ if (cvmx_atomic_get32(&pending_fork) == 0)
+ cvmx_dprintf("Active coremask = 0x%x\n", system_info->core_mask);
+ if (lastcpu)
+ system_info->init_core = cvmx_get_core_num();
+ cvmx_spinlock_unlock(&mask_lock);
+
+ /* Spinning waiting for forks to complete */
+ while (cvmx_atomic_get32(&pending_fork)) {}
+
+ cvmx_coremask_barrier_sync(system_info->core_mask);
+
+ int ret = sysmips(MIPS_CAVIUM_XKPHYS_WRITE, getpid(), 3, 0);
+ if (ret != 0) {
+ int32_t w = cvmx_atomic_fetch_and_add32(&warn_count, 1);
+ if (!w) {
+ switch(errno) {
+ case EINVAL:
+ perror("sysmips(MIPS_CAVIUM_XKPHYS_WRITE) failed.\n"
+ " Did you configure your kernel with both:\n"
+ " CONFIG_CAVIUM_OCTEON_USER_MEM_PER_PROCESS *and*\n"
+ " CONFIG_CAVIUM_OCTEON_USER_IO_PER_PROCESS?");
+ break;
+ case EPERM:
+ perror("sysmips(MIPS_CAVIUM_XKPHYS_WRITE) failed.\n"
+ " Are you running as root?");
+ break;
+ default:
+ perror("sysmips(MIPS_CAVIUM_XKPHYS_WRITE) failed");
+ break;
+ }
+ }
+ }
+
+ int result = appmain(argc, argv);
+
+ /* Wait for all forks to complete. This needs to be the core that started
+ all of the forks. It may not be the lowest numbered core! */
+ if (cvmx_get_core_num() == system_info->init_core)
+ {
+ int num_waits;
+ CVMX_POP(num_waits, system_info->core_mask);
+ num_waits--;
+ while (num_waits--)
+ {
+ if (wait(NULL) == -1)
+ perror("CVMX: Wait for forked child failed\n");
+ }
+ }
+
+ shutdown_cvmx_shared();
+
+ return result;
+}
diff --git a/cvmx-app-init.c b/cvmx-app-init.c
new file mode 100644
index 000000000000..87692186a495
--- /dev/null
+++ b/cvmx-app-init.c
@@ -0,0 +1,616 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-spinlock.h"
+#include <octeon-app-init.h>
+#include "cvmx-sysinfo.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-uart.h"
+#include "cvmx-ciu.h"
+#include "cvmx-coremask.h"
+#include "cvmx-core.h"
+#include "cvmx-interrupt.h"
+#include "cvmx-ebt3000.h"
+#include "../../bootloader/u-boot/include/octeon_mem_map.h"
+
+int cvmx_debug_uart;
+
+/**
+ * @file
+ *
+ * Main entry point for all simple executive based programs.
+ */
+
+
+extern void cvmx_interrupt_initialize(void);
+
+
+
+/**
+ * Main entry point for all simple executive based programs.
+ * This is the first C function called. It completes
+ * initialization, calls main, and performs C level cleanup.
+ *
+ * @param app_desc_addr
+ * Address of the application description structure passed
+ * brom the boot loader.
+ */
+EXTERN_ASM void __cvmx_app_init(uint64_t app_desc_addr);
+
+
+/**
+ * Set up sysinfo structure from boot descriptor versions 6 and higher.
+ * In these versions, the interesting data in not in the boot info structure
+ * defined by the toolchain, but is in the cvmx_bootinfo structure defined in
+ * the simple exec.
+ *
+ * @param app_desc_ptr
+ * pointer to boot descriptor block
+ *
+ * @param sys_info_ptr
+ * pointer to sysinfo structure to fill in
+ */
+static void process_boot_desc_ver_6(octeon_boot_descriptor_t *app_desc_ptr, cvmx_sysinfo_t *sys_info_ptr)
+{
+ cvmx_bootinfo_t *cvmx_bootinfo_ptr = CASTPTR(cvmx_bootinfo_t, app_desc_ptr->cvmx_desc_vaddr);
+
+ /* copy application information for simple exec use */
+ /* Populate the sys_info structure from the boot descriptor block created by the bootloader.
+ ** The boot descriptor block is put in the top of the heap, so it will be overwritten when the
+ ** heap is fully used. Information that is to be used must be copied before that.
+ ** Applications should only use the sys_info structure, not the boot descriptor
+ */
+ if (cvmx_bootinfo_ptr->major_version == 1)
+ {
+ sys_info_ptr->core_mask = cvmx_bootinfo_ptr->core_mask;
+ sys_info_ptr->heap_base = cvmx_bootinfo_ptr->heap_base;
+ sys_info_ptr->heap_size = cvmx_bootinfo_ptr->heap_end - cvmx_bootinfo_ptr->heap_base;
+ sys_info_ptr->stack_top = cvmx_bootinfo_ptr->stack_top;
+ sys_info_ptr->stack_size = cvmx_bootinfo_ptr->stack_size;
+ sys_info_ptr->init_core = cvmx_get_core_num();
+ sys_info_ptr->phy_mem_desc_ptr = CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, cvmx_bootinfo_ptr->phy_mem_desc_addr));
+ sys_info_ptr->exception_base_addr = cvmx_bootinfo_ptr->exception_base_addr;
+ sys_info_ptr->cpu_clock_hz = cvmx_bootinfo_ptr->eclock_hz;
+ sys_info_ptr->dram_data_rate_hz = cvmx_bootinfo_ptr->dclock_hz * 2;
+
+ sys_info_ptr->board_type = cvmx_bootinfo_ptr->board_type;
+ sys_info_ptr->board_rev_major = cvmx_bootinfo_ptr->board_rev_major;
+ sys_info_ptr->board_rev_minor = cvmx_bootinfo_ptr->board_rev_minor;
+ memcpy(sys_info_ptr->mac_addr_base, cvmx_bootinfo_ptr->mac_addr_base, 6);
+ sys_info_ptr->mac_addr_count = cvmx_bootinfo_ptr->mac_addr_count;
+ memcpy(sys_info_ptr->board_serial_number, cvmx_bootinfo_ptr->board_serial_number, CVMX_BOOTINFO_OCTEON_SERIAL_LEN);
+ sys_info_ptr->console_uart_num = 0;
+ if (cvmx_bootinfo_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1)
+ sys_info_ptr->console_uart_num = 1;
+
+ if (cvmx_bootinfo_ptr->dram_size > 16*1024*1024)
+ sys_info_ptr->system_dram_size = (uint64_t)cvmx_bootinfo_ptr->dram_size; /* older bootloaders incorrectly gave this in bytes, so don't convert */
+ else
+ sys_info_ptr->system_dram_size = (uint64_t)cvmx_bootinfo_ptr->dram_size * 1024 * 1024; /* convert from Megabytes to bytes */
+ if (cvmx_bootinfo_ptr->minor_version >= 1)
+ {
+ sys_info_ptr->compact_flash_common_base_addr = cvmx_bootinfo_ptr->compact_flash_common_base_addr;
+ sys_info_ptr->compact_flash_attribute_base_addr = cvmx_bootinfo_ptr->compact_flash_attribute_base_addr;
+ sys_info_ptr->led_display_base_addr = cvmx_bootinfo_ptr->led_display_base_addr;
+ }
+ else if (sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT3000 ||
+ sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT5800)
+ {
+ /* Default these variables so that users of structure can be the same no
+ ** matter what version fo boot info block the bootloader passes */
+ sys_info_ptr->compact_flash_common_base_addr = 0x1d000000 + 0x800;
+ sys_info_ptr->compact_flash_attribute_base_addr = 0x1d010000;
+ if (sys_info_ptr->board_rev_major == 1)
+ sys_info_ptr->led_display_base_addr = 0x1d020000;
+ else
+ sys_info_ptr->led_display_base_addr = 0x1d020000 + 0xf8;
+ }
+ else
+ {
+ sys_info_ptr->compact_flash_common_base_addr = 0;
+ sys_info_ptr->compact_flash_attribute_base_addr = 0;
+ sys_info_ptr->led_display_base_addr = 0;
+ }
+
+ if (cvmx_bootinfo_ptr->minor_version >= 2)
+ {
+ sys_info_ptr->dfa_ref_clock_hz = cvmx_bootinfo_ptr->dfa_ref_clock_hz;
+ sys_info_ptr->bootloader_config_flags = cvmx_bootinfo_ptr->config_flags;
+ }
+ else
+ {
+ sys_info_ptr->dfa_ref_clock_hz = 0;
+ sys_info_ptr->bootloader_config_flags = 0;
+ if (app_desc_ptr->flags & OCTEON_BL_FLAG_DEBUG)
+ sys_info_ptr->bootloader_config_flags |= CVMX_BOOTINFO_CFG_FLAG_DEBUG;
+ if (app_desc_ptr->flags & OCTEON_BL_FLAG_NO_MAGIC)
+ sys_info_ptr->bootloader_config_flags |= CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC;
+ }
+
+ }
+ else
+ {
+ printf("ERROR: Incompatible CVMX descriptor passed by bootloader: %d.%d\n",
+ (int)cvmx_bootinfo_ptr->major_version, (int)cvmx_bootinfo_ptr->minor_version);
+ while (1);
+ }
+}
+
+
+/**
+ * Interrupt handler for debugger Control-C interrupts.
+ *
+ * @param irq_number IRQ interrupt number
+ * @param registers CPU registers at the time of the interrupt
+ * @param user_arg Unused user argument
+ */
+static void process_debug_interrupt(int irq_number, uint64_t registers[32], void *user_arg)
+{
+ int uart = irq_number - CVMX_IRQ_UART0;
+ cvmx_uart_lsr_t lsrval;
+
+ /* Check for a Control-C interrupt from the debugger. This loop will eat
+ all input received on the uart */
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart));
+ while (lsrval.s.dr)
+ {
+ int c = cvmx_read_csr(CVMX_MIO_UARTX_RBR(uart));
+ if (c == '\003')
+ {
+ register uint64_t tmp;
+ fflush(stderr);
+ fflush(stdout);
+ /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also
+ set the MCD0 to be not masked by this core so we know
+ the signal is received by someone */
+ asm volatile (
+ "dmfc0 %0, $22\n"
+ "ori %0, %0, 0x1110\n"
+ "dmtc0 %0, $22\n"
+ : "=r" (tmp));
+ }
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart));
+ }
+}
+
+/**
+ * Interrupt handler for calling exit on Control-C interrupts.
+ *
+ * @param irq_number IRQ interrupt number
+ * @param registers CPU registers at the time of the interrupt
+ * @param user_arg Unused user argument
+ */
+static void process_break_interrupt(int irq_number, uint64_t registers[32], void *user_arg)
+{
+ /* Exclude new functionality when building with older toolchains */
+#if OCTEON_APP_INIT_H_VERSION >= 3
+ int uart = irq_number - CVMX_IRQ_UART0;
+ cvmx_uart_lsr_t lsrval;
+
+ /* Check for a Control-C interrupt from the console. This loop will eat
+ all input received on the uart */
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart));
+ while (lsrval.s.dr)
+ {
+ int c = cvmx_read_csr(CVMX_MIO_UARTX_RBR(uart));
+ if (c == '\003')
+ {
+ register uint64_t tmp;
+
+ /* Wait for an another Control-C if right now we have no
+ access to the console. After this point we hold the
+ lock and use a different lock to synchronize between
+ the memfile dumps from different cores. As a
+ consequence regular printfs *don't* work after this
+ point! */
+ if (__octeon_uart_trylock () == 1)
+ return;
+
+ /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also
+ set the MCD0 to be not masked by this core so we know
+ the signal is received by someone */
+ asm volatile (
+ "dmfc0 %0, $22\n"
+ "ori %0, %0, 0x1110\n"
+ "dmtc0 %0, $22\n"
+ : "=r" (tmp));
+ }
+ lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart));
+ }
+#endif
+}
+
+/**
+ * This is the debug exception handler with "break". Before calling exit to
+ * dump the profile-feedback output it releases the lock on the console.
+ * This way if there is buffered data in stdout it can still be flushed.
+ * stdio is required to flush all output during an fread.
+ */
+
+static void exit_on_break(void)
+{
+#if OCTEON_APP_INIT_H_VERSION >= 4
+ unsigned int coremask = cvmx_sysinfo_get()->core_mask;
+
+ cvmx_coremask_barrier_sync(coremask);
+ if (cvmx_coremask_first_core(coremask))
+ __octeon_uart_unlock();
+#endif
+
+ exit(0);
+}
+
+/* Add string signature to applications so that we can easily tell what
+** Octeon revision they were compiled for. Don't make static to avoid unused
+** variable warning. */
+#define xstr(s) str(s)
+#define str(s) #s
+
+int octeon_model_version_check(uint32_t chip_id);
+
+#define OMS xstr(OCTEON_MODEL)
+char octeon_rev_signature[] =
+#ifdef USE_RUNTIME_MODEL_CHECKS
+ "Compiled for runtime Octeon model checking";
+#else
+ "Compiled for Octeon processor id: "OMS;
+#endif
+
+void __cvmx_app_init(uint64_t app_desc_addr)
+{
+ /* App descriptor used by bootloader */
+ octeon_boot_descriptor_t *app_desc_ptr = CASTPTR(octeon_boot_descriptor_t, app_desc_addr);
+
+ /* app info structure used by the simple exec */
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+
+ if (cvmx_coremask_first_core(app_desc_ptr->core_mask))
+ {
+ /* do once per application setup */
+ if (app_desc_ptr->desc_version < 6)
+ {
+ printf("Obsolete bootloader, can't run application\n");
+ while (1)
+ ;
+ }
+ else
+ {
+ /* Handle all newer versions here.... */
+ if (app_desc_ptr->desc_version > 7)
+ {
+ printf("Warning: newer boot descripter version than expected\n");
+ }
+ process_boot_desc_ver_6(app_desc_ptr,sys_info_ptr);
+
+ }
+ }
+ cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
+
+ /* All cores need to enable MCD0 signals if the debugger flag is set */
+ if (sys_info_ptr->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_DEBUG)
+ {
+ /* Set all cores to stop on MCD0 signals */
+ uint64_t tmp;
+ asm volatile(
+ "dmfc0 %0, $22, 0\n"
+ "or %0, %0, 0x1100\n"
+ "dmtc0 %0, $22, 0\n" : "=r" (tmp));
+ }
+
+ cvmx_interrupt_initialize();
+
+ if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
+ {
+ /* Check to make sure the Chip version matches the configured version */
+ uint32_t chip_id = cvmx_get_proc_id();
+ int debugflag = sys_info_ptr->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_DEBUG;
+ int breakflag = sys_info_ptr->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_BREAK;
+ int uart;
+
+ /* Intialize the bootmem allocator with the descriptor that was provided by
+ ** the bootloader
+ ** IMPORTANT: All printfs must happen after this since PCI console uses named
+ ** blocks.
+ */
+ cvmx_bootmem_init(sys_info_ptr->phy_mem_desc_ptr);
+
+ /* Make sure we can properly run on this chip */
+ octeon_model_version_check(chip_id);
+
+ /* Default to the second uart port. Set this even if debug was
+ not passed. The idea is that if the program crashes one would
+ be able to break in on uart1 even without debug. */
+ cvmx_debug_uart = 1;
+ /* If the debugger flag is set, setup the uart Control-C interrupt
+ handler */
+ if (debugflag)
+ {
+ /* Search through the arguments for a debug=X */
+ unsigned int i;
+ for (i=0; i<app_desc_ptr->argc; i++)
+ {
+ const char *argv = CASTPTR(const char, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, app_desc_ptr->argv[i]));
+ if (strncmp(argv, "debug=", 6) == 0)
+ {
+ /* Use the supplied uart as an override */
+ cvmx_debug_uart = atoi(argv+6);
+ break;
+ }
+ }
+ cvmx_interrupt_register(CVMX_IRQ_UART0+cvmx_debug_uart, process_debug_interrupt, NULL);
+ uart = cvmx_debug_uart;
+ }
+ else if (breakflag)
+ {
+ unsigned int i;
+ int32_t *trampoline = CASTPTR(int32_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, BOOTLOADER_DEBUG_TRAMPOLINE));
+ /* Default to the first uart port. */
+ uart = 0;
+
+ /* Search through the arguments for a break=X */
+ for (i = 0; i < app_desc_ptr->argc; i++)
+ {
+ const char *argv = CASTPTR(const char, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, app_desc_ptr->argv[i]));
+ if (strncmp(argv, "break=", 6) == 0)
+ {
+ /* Use the supplied uart as an override */
+ uart = atoi(argv+6);
+ break;
+ }
+ }
+
+ /* On debug exception, call exit_on_break from all cores. */
+ *trampoline = (int32_t)(long)&exit_on_break;
+ cvmx_interrupt_register(CVMX_IRQ_UART0 + uart, process_break_interrupt, NULL);
+ }
+ if (debugflag || breakflag)
+ {
+ /* Enable uart interrupts for debugger Control-C processing */
+ cvmx_uart_ier_t ier;
+ ier.u64 = cvmx_read_csr(CVMX_MIO_UARTX_IER(uart));
+ ier.s.erbfi = 1;
+ cvmx_write_csr(CVMX_MIO_UARTX_IER(uart), ier.u64);
+
+ cvmx_interrupt_unmask_irq(CVMX_IRQ_UART0+uart);
+ }
+ }
+
+ /* Clear BEV now that we have installed exception handlers. */
+ uint64_t tmp;
+ asm volatile (
+ " .set push \n"
+ " .set mips64 \n"
+ " .set noreorder \n"
+ " .set noat \n"
+ " mfc0 %[tmp], $12, 0 \n"
+ " li $at, 1 << 22 \n"
+ " not $at, $at \n"
+ " and %[tmp], $at \n"
+ " mtc0 %[tmp], $12, 0 \n"
+ " .set pop \n"
+ : [tmp] "=&r" (tmp) : );
+
+ /* Set all cores to stop on MCD0 signals */
+ asm volatile(
+ "dmfc0 %0, $22, 0\n"
+ "or %0, %0, 0x1100\n"
+ "dmtc0 %0, $22, 0\n" : "=r" (tmp));
+
+ CVMX_SYNC;
+ /* Synchronise all cores at this point */
+ cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
+
+}
+
+int cvmx_user_app_init(void)
+{
+ uint64_t bist_val;
+ uint64_t mask;
+ int bist_errors = 0;
+ uint64_t tmp;
+ uint64_t base_addr;
+
+
+ /* Put message on LED display */
+ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
+ ebt3000_str_write("CVMX ");
+
+ /* Check BIST results for COP0 registers, some values only meaningful in pass 2 */
+ CVMX_MF_CACHE_ERR(bist_val);
+ mask = (1ULL<<32) | (1ULL<<33) | (1ULL<<34) | (1ULL<<35) | (1ULL<<36);
+ bist_val &= mask;
+ if (bist_val)
+ {
+ printf("BIST FAILURE: COP0_CACHE_ERR: 0x%llx\n", (unsigned long long)bist_val);
+ bist_errors++;
+ }
+ /* Clear parity error bits */
+ CVMX_MF_CACHE_ERR(bist_val);
+ bist_val &= ~0x7ull;
+ CVMX_MT_CACHE_ERR(bist_val);
+
+
+ mask = 0xfc00000000000000ull;
+ CVMX_MF_CVM_MEM_CTL(bist_val);
+ bist_val &= mask;
+ if (bist_val)
+ {
+ printf("BIST FAILURE: COP0_CVM_MEM_CTL: 0x%llx\n", (unsigned long long)bist_val);
+ bist_errors++;
+ }
+
+ /* Clear DCACHE parity error bit */
+ bist_val = 0;
+ CVMX_MF_DCACHE_ERR(bist_val);
+
+ mask = 0x18ull;
+ bist_val = cvmx_read_csr(CVMX_L2D_ERR);
+ if (bist_val & mask)
+ {
+ printf("ERROR: ECC error detected in L2 Data, L2D_ERR: 0x%llx\n", (unsigned long long)bist_val);
+ cvmx_write_csr(CVMX_L2D_ERR, bist_val); /* Clear error bits if set */
+ }
+ bist_val = cvmx_read_csr(CVMX_L2T_ERR);
+ if (bist_val & mask)
+ {
+ printf("ERROR: ECC error detected in L2 Tags, L2T_ERR: 0x%llx\n", (unsigned long long)bist_val);
+ cvmx_write_csr(CVMX_L2T_ERR, bist_val); /* Clear error bits if set */
+ }
+
+
+ /* Set up 4 cache lines of local memory, make available from Kernel space */
+ CVMX_MF_CVM_MEM_CTL(tmp);
+ tmp &= ~0x1ffull;
+ tmp |= 0x104ull;
+ CVMX_MT_CVM_MEM_CTL(tmp);
+
+
+#if CVMX_USE_1_TO_1_TLB_MAPPINGS
+
+ /* Check to see if the bootloader is indicating that the application is outside
+ ** of the 0x10000000 0x20000000 range, in which case we can't use 1-1 mappings */
+ if (cvmx_sysinfo_get()->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING)
+ {
+ printf("ERROR: 1-1 TLB mappings configured and oversize application loaded.\n");
+ printf("ERROR: Either 1-1 TLB mappings must be disabled or application size reduced.\n");
+ while (1)
+ ;
+ }
+
+
+ /* Create 1-1 Mappings for all DRAM up to 8 gigs, excluding the low 1 Megabyte. This area
+ ** is reserved for the bootloader and exception vectors. By not mapping this area, NULL pointer
+ ** dereferences will be caught with TLB exceptions. Exception handlers should be written
+ ** using XKPHYS or KSEG0 addresses. */
+#if CVMX_NULL_POINTER_PROTECT
+ /* Exclude low 1 MByte from mapping to detect NULL pointer accesses.
+ ** The only down side of this is it uses more TLB mappings */
+ cvmx_core_add_fixed_tlb_mapping_bits(0x0, 0x0, 0x100000 | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, CVMX_TLB_PAGEMASK_1M);
+ cvmx_core_add_fixed_tlb_mapping(0x200000, 0x200000, 0x300000, CVMX_TLB_PAGEMASK_1M);
+ cvmx_core_add_fixed_tlb_mapping(0x400000, 0x400000, 0x500000, CVMX_TLB_PAGEMASK_1M);
+ cvmx_core_add_fixed_tlb_mapping(0x600000, 0x600000, 0x700000, CVMX_TLB_PAGEMASK_1M);
+
+ cvmx_core_add_fixed_tlb_mapping(0x800000, 0x800000, 0xC00000, CVMX_TLB_PAGEMASK_4M);
+ cvmx_core_add_fixed_tlb_mapping(0x1000000, 0x1000000, 0x1400000, CVMX_TLB_PAGEMASK_4M);
+ cvmx_core_add_fixed_tlb_mapping(0x1800000, 0x1800000, 0x1c00000, CVMX_TLB_PAGEMASK_4M);
+
+ cvmx_core_add_fixed_tlb_mapping(0x2000000, 0x2000000, 0x3000000, CVMX_TLB_PAGEMASK_16M);
+ cvmx_core_add_fixed_tlb_mapping(0x4000000, 0x4000000, 0x5000000, CVMX_TLB_PAGEMASK_16M);
+ cvmx_core_add_fixed_tlb_mapping(0x6000000, 0x6000000, 0x7000000, CVMX_TLB_PAGEMASK_16M);
+#else
+ /* Map entire low 128 Megs, including 0x0 */
+ cvmx_core_add_fixed_tlb_mapping(0x0, 0x0, 0x4000000ULL, CVMX_TLB_PAGEMASK_64M);
+#endif
+ cvmx_core_add_fixed_tlb_mapping(0x8000000ULL, 0x8000000ULL, 0xc000000ULL, CVMX_TLB_PAGEMASK_64M);
+
+ /* Create 1-1 mapping for next 256 megs
+ ** bottom page is not valid */
+ cvmx_core_add_fixed_tlb_mapping_bits(0x400000000ULL, 0, 0x410000000ULL | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, CVMX_TLB_PAGEMASK_256M);
+
+ /* Map from 0.5 up to the installed memory size in 512 MByte chunks. If this loop runs out of memory,
+ ** the NULL pointer detection can be disabled to free up more TLB entries. */
+ if (cvmx_sysinfo_get()->system_dram_size > 0x20000000ULL)
+ {
+ for (base_addr = 0x20000000ULL; base_addr <= (cvmx_sysinfo_get()->system_dram_size - 0x20000000ULL); base_addr += 0x20000000ULL)
+ {
+ if (0 > cvmx_core_add_fixed_tlb_mapping(base_addr, base_addr, base_addr + 0x10000000ULL, CVMX_TLB_PAGEMASK_256M))
+ {
+ printf("ERROR adding 1-1 TLB mapping for address 0x%llx\n", (unsigned long long)base_addr);
+ while (1); /* Hang here, as expected memory mappings aren't set up if this fails */
+ }
+ }
+ }
+
+
+#endif
+
+
+ cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
+ cvmx_bootmem_init(sys_info_ptr->phy_mem_desc_ptr);
+
+ return(0);
+}
+
+void __cvmx_app_exit(void)
+{
+ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
+ {
+ uint64_t val;
+ uint64_t mask, expected;
+ int bist_errors = 0;
+
+ mask = 0x1ull;
+ expected = 0x0ull;
+ CVMX_MF_DCACHE_ERR(val);
+ val = (val & mask) ^ expected;
+ if (val)
+ {
+ printf("DCACHE Parity error: 0x%llx\n", (unsigned long long)val);
+ bist_errors++;
+ }
+
+ mask = 0x18ull;
+ expected = 0x0ull;
+ val = cvmx_read_csr(CVMX_L2D_ERR);
+ val = (val & mask) ^ expected;
+ if (val)
+ {
+ printf("L2 Parity error: 0x%llx\n", (unsigned long long)val);
+ bist_errors++;
+ }
+
+
+ while (1)
+ ;
+
+ }
+}
+
+
+
diff --git a/cvmx-app-init.h b/cvmx-app-init.h
new file mode 100644
index 000000000000..cfe65fb4dbbd
--- /dev/null
+++ b/cvmx-app-init.h
@@ -0,0 +1,295 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+/**
+ * @file
+ * Header file for simple executive application initialization. This defines
+ * part of the ABI between the bootloader and the application.
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+
+#ifndef __CVMX_APP_INIT_H__
+#define __CVMX_APP_INIT_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Current major and minor versions of the CVMX bootinfo block that is passed
+** from the bootloader to the application. This is versioned so that applications
+** can properly handle multiple bootloader versions. */
+#define CVMX_BOOTINFO_MAJ_VER 1
+#define CVMX_BOOTINFO_MIN_VER 2
+
+
+#if (CVMX_BOOTINFO_MAJ_VER == 1)
+#define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
+/* This structure is populated by the bootloader. For binary
+** compatibility the only changes that should be made are
+** adding members to the end of the structure, and the minor
+** version should be incremented at that time.
+** If an incompatible change is made, the major version
+** must be incremented, and the minor version should be reset
+** to 0.
+*/
+typedef struct
+{
+ uint32_t major_version;
+ uint32_t minor_version;
+
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ uint64_t desc_vaddr;
+
+ uint32_t exception_base_addr;
+ uint32_t stack_size;
+ uint32_t flags;
+ uint32_t core_mask;
+ uint32_t dram_size; /**< DRAM size in megabytes */
+ uint32_t phy_mem_desc_addr; /**< physical address of free memory descriptor block*/
+ uint32_t debugger_flags_base_addr; /**< used to pass flags from app to debugger */
+ uint32_t eclock_hz; /**< CPU clock speed, in hz */
+ uint32_t dclock_hz; /**< DRAM clock speed, in hz */
+ uint32_t reserved0;
+ uint16_t board_type;
+ uint8_t board_rev_major;
+ uint8_t board_rev_minor;
+ uint16_t reserved1;
+ uint8_t reserved2;
+ uint8_t reserved3;
+ char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+#if (CVMX_BOOTINFO_MIN_VER >= 1)
+ /* Several boards support compact flash on the Octeon boot bus. The CF
+ ** memory spaces may be mapped to different addresses on different boards.
+ ** These are the physical addresses, so care must be taken to use the correct
+ ** XKPHYS/KSEG0 addressing depending on the application's ABI.
+ ** These values will be 0 if CF is not present */
+ uint64_t compact_flash_common_base_addr;
+ uint64_t compact_flash_attribute_base_addr;
+ /* Base address of the LED display (as on EBT3000 board)
+ ** This will be 0 if LED display not present. */
+ uint64_t led_display_base_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 2)
+ uint32_t dfa_ref_clock_hz; /**< DFA reference clock in hz (if applicable)*/
+ uint32_t config_flags; /**< flags indicating various configuration options. These flags supercede
+ ** the 'flags' variable and should be used instead if available */
+#endif
+
+
+} cvmx_bootinfo_t;
+
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST (1ull << 0)
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_TARGET (1ull << 1)
+#define CVMX_BOOTINFO_CFG_FLAG_DEBUG (1ull << 2)
+#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
+/* This flag is set if the TLB mappings are not contained in the
+** 0x10000000 - 0x20000000 boot bus region. */
+#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
+#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
+
+#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
+
+
+/* Type defines for board and chip types */
+enum cvmx_board_types_enum {
+ CVMX_BOARD_TYPE_NULL = 0,
+ CVMX_BOARD_TYPE_SIM = 1,
+ CVMX_BOARD_TYPE_EBT3000 = 2,
+ CVMX_BOARD_TYPE_KODAMA = 3,
+ CVMX_BOARD_TYPE_NIAGARA = 4, /* Obsolete, no longer supported */
+ CVMX_BOARD_TYPE_NAC38 = 5, /* formerly NAO38 */
+ CVMX_BOARD_TYPE_THUNDER = 6,
+ CVMX_BOARD_TYPE_TRANTOR = 7, /* Obsolete, no longer supported */
+ CVMX_BOARD_TYPE_EBH3000 = 8,
+ CVMX_BOARD_TYPE_EBH3100 = 9,
+ CVMX_BOARD_TYPE_HIKARI = 10,
+ CVMX_BOARD_TYPE_CN3010_EVB_HS5 = 11,
+ CVMX_BOARD_TYPE_CN3005_EVB_HS5 = 12,
+ CVMX_BOARD_TYPE_KBP = 13,
+ CVMX_BOARD_TYPE_CN3020_EVB_HS5 = 14, /* Deprecated, CVMX_BOARD_TYPE_CN3010_EVB_HS5 supports the CN3020 */
+ CVMX_BOARD_TYPE_EBT5800 = 15,
+ CVMX_BOARD_TYPE_NICPRO2 = 16,
+ CVMX_BOARD_TYPE_EBH5600 = 17,
+ CVMX_BOARD_TYPE_EBH5601 = 18,
+ CVMX_BOARD_TYPE_EBH5200 = 19,
+ CVMX_BOARD_TYPE_BBGW_REF = 20,
+ CVMX_BOARD_TYPE_NIC_XLE_4G = 21,
+ CVMX_BOARD_TYPE_EBT5600 = 22,
+ CVMX_BOARD_TYPE_EBH5201 = 23,
+ CVMX_BOARD_TYPE_EBT5200 = 24,
+ CVMX_BOARD_TYPE_CB5600 = 25,
+ CVMX_BOARD_TYPE_CB5601 = 26,
+ CVMX_BOARD_TYPE_CB5200 = 27,
+ CVMX_BOARD_TYPE_GENERIC = 28, /* Special 'generic' board type, supports many boards */
+ CVMX_BOARD_TYPE_MAX,
+
+ /* The range from CVMX_BOARD_TYPE_MAX to CVMX_BOARD_TYPE_CUST_DEFINED_MIN is reserved
+ ** for future SDK use. */
+
+ /* Set aside a range for customer boards. These numbers are managed
+ ** by Cavium.
+ */
+ CVMX_BOARD_TYPE_CUST_DEFINED_MIN = 10000,
+ CVMX_BOARD_TYPE_CUST_WSX16 = 10001,
+ CVMX_BOARD_TYPE_CUST_NS0216 = 10002,
+ CVMX_BOARD_TYPE_CUST_NB5 = 10003,
+ CVMX_BOARD_TYPE_CUST_WMR500 = 10004,
+ CVMX_BOARD_TYPE_CUST_ITB101 = 10005,
+ CVMX_BOARD_TYPE_CUST_NTE102 = 10006,
+ CVMX_BOARD_TYPE_CUST_AGS103 = 10007,
+ CVMX_BOARD_TYPE_CUST_GST104 = 10008,
+ CVMX_BOARD_TYPE_CUST_GCT105 = 10009,
+ CVMX_BOARD_TYPE_CUST_AGS106 = 10010,
+ CVMX_BOARD_TYPE_CUST_SGM107 = 10011,
+ CVMX_BOARD_TYPE_CUST_GCT108 = 10012,
+ CVMX_BOARD_TYPE_CUST_AGS109 = 10013,
+ CVMX_BOARD_TYPE_CUST_GCT110 = 10014,
+ CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
+
+ /* Set aside a range for customer private use. The SDK won't
+ ** use any numbers in this range. */
+ CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
+ CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
+
+ /* The remaining range is reserved for future use. */
+};
+enum cvmx_chip_types_enum {
+ CVMX_CHIP_TYPE_NULL = 0,
+ CVMX_CHIP_SIM_TYPE_DEPRECATED = 1,
+ CVMX_CHIP_TYPE_OCTEON_SAMPLE = 2,
+ CVMX_CHIP_TYPE_MAX,
+};
+
+/* Compatability alias for NAC38 name change, planned to be removed from SDK 1.7 */
+#define CVMX_BOARD_TYPE_NAO38 CVMX_BOARD_TYPE_NAC38
+
+/* Functions to return string based on type */
+#define ENUM_BRD_TYPE_CASE(x) case x: return(#x + 16); /* Skip CVMX_BOARD_TYPE_ */
+static inline const char *cvmx_board_type_to_string(enum cvmx_board_types_enum type)
+{
+ switch (type)
+ {
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NULL)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SIM)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT3000)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KODAMA)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIAGARA)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NAC38)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_THUNDER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TRANTOR)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3000)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3100)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_HIKARI)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3010_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3005_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KBP)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3020_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5800)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NICPRO2)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5601)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_BBGW_REF)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5601)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_GENERIC)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX)
+
+ /* Customer boards listed here */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MIN)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WSX16)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NS0216)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NB5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WMR500)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_ITB101)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NTE102)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS103)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GST104)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT105)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS106)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_SGM107)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT108)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS109)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT110)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MAX)
+
+ /* Customer private range */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
+ }
+ return "Unsupported Board";
+}
+
+#define ENUM_CHIP_TYPE_CASE(x) case x: return(#x + 15); /* Skip CVMX_CHIP_TYPE */
+static inline const char *cvmx_chip_type_to_string(enum cvmx_chip_types_enum type)
+{
+ switch (type)
+ {
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_NULL)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_SIM_TYPE_DEPRECATED)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_OCTEON_SAMPLE)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_MAX)
+ }
+ return "Unsupported Chip";
+}
+
+
+extern int cvmx_debug_uart;
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_APP_INIT_H__ */
diff --git a/cvmx-asm.h b/cvmx-asm.h
new file mode 100644
index 000000000000..2406677efca5
--- /dev/null
+++ b/cvmx-asm.h
@@ -0,0 +1,513 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This is file defines ASM primitives for the executive.
+
+ * <hr>$Revision: 42280 $<hr>
+ *
+ *
+ */
+#ifndef __CVMX_ASM_H__
+#define __CVMX_ASM_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* turn the variable name into a string */
+#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
+#define CVMX_TMP_STR2(x) #x
+
+/* other useful stuff */
+#define CVMX_BREAK asm volatile ("break")
+#define CVMX_SYNC asm volatile ("sync" : : :"memory")
+/* String version of SYNCW macro for using in inline asm constructs */
+#define CVMX_SYNCW_STR "syncw\nsyncw\n"
+#ifdef __OCTEON__
+ #define CVMX_SYNCIO asm volatile ("nop") /* Deprecated, will be removed in future release */
+ #define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : :"memory")
+ #define CVMX_SYNCIOALL asm volatile ("nop") /* Deprecated, will be removed in future release */
+ /* We actually use two syncw instructions in a row when we need a write
+ memory barrier. This is because the CN3XXX series of Octeons have
+ errata Core-401. This can cause a single syncw to not enforce
+ ordering under very rare conditions. Even if it is rare, better safe
+ than sorry */
+ #define CVMX_SYNCW asm volatile ("syncw\nsyncw\n" : : :"memory")
+#if defined(VXWORKS) || defined(__linux__)
+ /* Define new sync instructions to be normal SYNC instructions for
+ operating systems that use threads */
+ #define CVMX_SYNCWS CVMX_SYNCW
+ #define CVMX_SYNCS CVMX_SYNC
+ #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+#else
+ #if defined(CVMX_BUILD_FOR_TOOLCHAIN)
+ /* While building simple exec toolchain, always use syncw to
+ support all Octeon models. */
+ #define CVMX_SYNCWS CVMX_SYNCW
+ #define CVMX_SYNCS CVMX_SYNC
+ #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+ #else
+ /* Again, just like syncw, we may need two syncws instructions in a row due
+ errata Core-401 */
+ #define CVMX_SYNCWS asm volatile ("syncws\nsyncws\n" : : :"memory")
+ #define CVMX_SYNCS asm volatile ("syncs" : : :"memory")
+ #define CVMX_SYNCWS_STR "syncws\nsyncws\n"
+ #endif
+#endif
+#else
+ /* Not using a Cavium compiler, always use the slower sync so the assembler stays happy */
+ #define CVMX_SYNCIO asm volatile ("nop") /* Deprecated, will be removed in future release */
+ #define CVMX_SYNCIOBDMA asm volatile ("sync" : : :"memory")
+ #define CVMX_SYNCIOALL asm volatile ("nop") /* Deprecated, will be removed in future release */
+ #define CVMX_SYNCW asm volatile ("sync" : : :"memory")
+ #define CVMX_SYNCWS CVMX_SYNCW
+ #define CVMX_SYNCS CVMX_SYNC
+ #define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+#endif
+#define CVMX_SYNCI(address, offset) asm volatile ("synci " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
+#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
+#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
+// a normal prefetch
+#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
+// normal prefetches that use the pref instruction
+#define CVMX_PREFETCH_PREFX(X, address, offset) asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
+#define CVMX_PREFETCH_PREF0(address, offset) CVMX_PREFETCH_PREFX(0, address, offset)
+#define CVMX_PREFETCH_PREF1(address, offset) CVMX_PREFETCH_PREFX(1, address, offset)
+#define CVMX_PREFETCH_PREF6(address, offset) CVMX_PREFETCH_PREFX(6, address, offset)
+#define CVMX_PREFETCH_PREF7(address, offset) CVMX_PREFETCH_PREFX(7, address, offset)
+// prefetch into L1, do not put the block in the L2
+#define CVMX_PREFETCH_NOTL2(address, offset) CVMX_PREFETCH_PREFX(4, address, offset)
+#define CVMX_PREFETCH_NOTL22(address, offset) CVMX_PREFETCH_PREFX(5, address, offset)
+// prefetch into L2, do not put the block in the L1
+#define CVMX_PREFETCH_L2(address, offset) CVMX_PREFETCH_PREFX(28, address, offset)
+// CVMX_PREPARE_FOR_STORE makes each byte of the block unpredictable (actually old value or zero) until
+// that byte is stored to (by this or another processor. Note that the value of each byte is not only
+// unpredictable, but may also change again - up until the point when one of the cores stores to the
+// byte.
+#define CVMX_PREPARE_FOR_STORE(address, offset) CVMX_PREFETCH_PREFX(30, address, offset)
+// This is a command headed to the L2 controller to tell it to clear its dirty bit for a
+// block. Basically, SW is telling HW that the current version of the block will not be
+// used.
+#define CVMX_DONT_WRITE_BACK(address, offset) CVMX_PREFETCH_PREFX(29, address, offset)
+
+#define CVMX_ICACHE_INVALIDATE { CVMX_SYNC; asm volatile ("synci 0($0)" : : ); } // flush stores, invalidate entire icache
+#define CVMX_ICACHE_INVALIDATE2 { CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); } // flush stores, invalidate entire icache
+#define CVMX_DCACHE_INVALIDATE { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); } // complete prefetches, invalidate entire dcache
+
+/* new instruction to make RC4 run faster */
+#define CVMX_BADDU(result, input1, input2) asm ("baddu %[rd],%[rs],%[rt]" : [rd] "=d" (result) : [rs] "d" (input1) , [rt] "d" (input2))
+
+// misc v2 stuff
+#define CVMX_ROTR(result, input1, shiftconst) asm ("rotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
+#define CVMX_ROTRV(result, input1, input2) asm ("rotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))
+#define CVMX_DROTR(result, input1, shiftconst) asm ("drotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
+#define CVMX_DROTRV(result, input1, input2) asm ("drotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))
+#define CVMX_SEB(result, input1) asm ("seb %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
+#define CVMX_SEH(result, input1) asm ("seh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
+#define CVMX_DSBH(result, input1) asm ("dsbh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
+#define CVMX_DSHD(result, input1) asm ("dshd %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
+#define CVMX_WSBH(result, input1) asm ("wsbh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
+
+// Endian swap
+#define CVMX_ES64(result, input) \
+ do {\
+ CVMX_DSBH(result, input); \
+ CVMX_DSHD(result, result); \
+ } while (0)
+#define CVMX_ES32(result, input) \
+ do {\
+ CVMX_WSBH(result, input); \
+ CVMX_ROTR(result, result, 16); \
+ } while (0)
+
+
+/* extract and insert - NOTE that pos and len variables must be constants! */
+/* the P variants take len rather than lenm1 */
+/* the M1 variants take lenm1 rather than len */
+#define CVMX_EXTS(result,input,pos,lenm1) asm ("exts %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
+#define CVMX_EXTSP(result,input,pos,len) CVMX_EXTS(result,input,pos,(len)-1)
+
+#define CVMX_DEXT(result,input,pos,len) asm ("dext %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len) : [rt] "=d" (result) : [rs] "d" (input))
+#define CVMX_DEXTM1(result,input,pos,lenm1) CVMX_DEXT(result,input,pos,(lenm1)+1)
+
+#define CVMX_EXT(result,input,pos,len) asm ("ext %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len) : [rt] "=d" (result) : [rs] "d" (input))
+#define CVMX_EXTM1(result,input,pos,lenm1) CVMX_EXT(result,input,pos,(lenm1)+1)
+
+// removed
+// #define CVMX_EXTU(result,input,pos,lenm1) asm ("extu %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
+// #define CVMX_EXTUP(result,input,pos,len) CVMX_EXTU(result,input,pos,(len)-1)
+
+#define CVMX_CINS(result,input,pos,lenm1) asm ("cins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
+#define CVMX_CINSP(result,input,pos,len) CVMX_CINS(result,input,pos,(len)-1)
+
+#define CVMX_DINS(result,input,pos,len) asm ("dins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
+#define CVMX_DINSM1(result,input,pos,lenm1) CVMX_DINS(result,input,pos,(lenm1)+1)
+#define CVMX_DINSC(result,pos,len) asm ("dins %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): "[rt]" (result))
+#define CVMX_DINSCM1(result,pos,lenm1) CVMX_DINSC(result,pos,(lenm1)+1)
+
+#define CVMX_INS(result,input,pos,len) asm ("ins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
+#define CVMX_INSM1(result,input,pos,lenm1) CVMX_INS(result,input,pos,(lenm1)+1)
+#define CVMX_INSC(result,pos,len) asm ("ins %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): "[rt]" (result))
+#define CVMX_INSCM1(result,pos,lenm1) CVMX_INSC(result,pos,(lenm1)+1)
+
+// removed
+// #define CVMX_INS0(result,input,pos,lenm1) asm("ins0 %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
+// #define CVMX_INS0P(result,input,pos,len) CVMX_INS0(result,input,pos,(len)-1)
+// #define CVMX_INS0C(result,pos,lenm1) asm ("ins0 %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : "[rt]" (result))
+// #define CVMX_INS0CP(result,pos,len) CVMX_INS0C(result,pos,(len)-1)
+
+#define CVMX_CLZ(result, input) asm ("clz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_DCLZ(result, input) asm ("dclz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_CLO(result, input) asm ("clo %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_DCLO(result, input) asm ("dclo %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_POP(result, input) asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_DPOP(result, input) asm ("dpop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+
+#ifdef CVMX_ABI_O32
+
+ /* rdhwr $31 is the 64 bit cmvcount register, it needs to be split
+ into one or two (depending on the width of the result) properly
+ sign extended registers. All other registers are 32 bits wide
+ and already properly sign extended. */
+# define CVMX_RDHWRX(result, regstr, ASM_STMT) ({ \
+ if (regstr == 31) { \
+ if (sizeof(result) == 8) { \
+ ASM_STMT (".set\tpush\n" \
+ "\t.set\tmips64r2\n" \
+ "\trdhwr\t%L0,$31\n" \
+ "\tdsra\t%M0,%L0,32\n" \
+ "\tsll\t%L0,%L0,0\n" \
+ "\t.set\tpop": "=d"(result)); \
+ } else { \
+ unsigned long _v; \
+ ASM_STMT ("rdhwr\t%0,$31\n" \
+ "\tsll\t%0,%0,0" : "=d"(_v)); \
+ result = (typeof(result))_v; \
+ } \
+ } else { \
+ unsigned long _v; \
+ ASM_STMT ("rdhwr\t%0,$" CVMX_TMP_STR(regstr) : "=d"(_v)); \
+ result = (typeof(result))_v; \
+ }})
+
+
+
+# define CVMX_RDHWR(result, regstr) CVMX_RDHWRX(result, regstr, asm volatile)
+# define CVMX_RDHWRNV(result, regstr) CVMX_RDHWRX(result, regstr, asm)
+#else
+# define CVMX_RDHWR(result, regstr) asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+# define CVMX_RDHWRNV(result, regstr) asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+#endif
+
+// some new cop0-like stuff
+#define CVMX_DI(result) asm volatile ("di %[rt]" : [rt] "=d" (result))
+#define CVMX_DI_NULL asm volatile ("di")
+#define CVMX_EI(result) asm volatile ("ei %[rt]" : [rt] "=d" (result))
+#define CVMX_EI_NULL asm volatile ("ei")
+#define CVMX_EHB asm volatile ("ehb")
+
+/* mul stuff */
+#define CVMX_MTM0(m) asm volatile ("mtm0 %[rs]" : : [rs] "d" (m))
+#define CVMX_MTM1(m) asm volatile ("mtm1 %[rs]" : : [rs] "d" (m))
+#define CVMX_MTM2(m) asm volatile ("mtm2 %[rs]" : : [rs] "d" (m))
+#define CVMX_MTP0(p) asm volatile ("mtp0 %[rs]" : : [rs] "d" (p))
+#define CVMX_MTP1(p) asm volatile ("mtp1 %[rs]" : : [rs] "d" (p))
+#define CVMX_MTP2(p) asm volatile ("mtp2 %[rs]" : : [rs] "d" (p))
+#define CVMX_VMULU(dest,mpcand,accum) asm volatile ("vmulu %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
+#define CVMX_VMM0(dest,mpcand,accum) asm volatile ("vmm0 %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
+#define CVMX_V3MULU(dest,mpcand,accum) asm volatile ("v3mulu %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
+
+/* branch stuff */
+// these are hard to make work because the compiler does not realize that the
+// instruction is a branch so may optimize away the label
+// the labels to these next two macros must not include a ":" at the end
+#define CVMX_BBIT1(var, pos, label) asm volatile ("bbit1 %[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(label) : : [rs] "d" (var))
+#define CVMX_BBIT0(var, pos, label) asm volatile ("bbit0 %[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(label) : : [rs] "d" (var))
+// the label to this macro must include a ":" at the end
+#define CVMX_ASM_LABEL(label) label \
+ asm volatile (CVMX_TMP_STR(label) : : )
+
+//
+// Low-latency memory stuff
+//
+// set can be 0-1
+#define CVMX_MT_LLM_READ_ADDR(set,val) asm volatile ("dmtc2 %[rt],0x0400+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
+#define CVMX_MT_LLM_WRITE_ADDR_INTERNAL(set,val) asm volatile ("dmtc2 %[rt],0x0401+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
+#define CVMX_MT_LLM_READ64_ADDR(set,val) asm volatile ("dmtc2 %[rt],0x0404+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
+#define CVMX_MT_LLM_WRITE64_ADDR_INTERNAL(set,val) asm volatile ("dmtc2 %[rt],0x0405+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
+#define CVMX_MT_LLM_DATA(set,val) asm volatile ("dmtc2 %[rt],0x0402+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
+#define CVMX_MF_LLM_DATA(set,val) asm volatile ("dmfc2 %[rt],0x0402+(8*(" CVMX_TMP_STR(set) "))" : [rt] "=d" (val) : )
+
+
+// load linked, store conditional
+#define CVMX_LL(dest, address, offset) asm volatile ("ll %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (dest) : [rbase] "d" (address) )
+#define CVMX_LLD(dest, address, offset) asm volatile ("lld %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (dest) : [rbase] "d" (address) )
+#define CVMX_SC(srcdest, address, offset) asm volatile ("sc %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+#define CVMX_SCD(srcdest, address, offset) asm volatile ("scd %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+
+// load/store word left/right
+#define CVMX_LWR(srcdest, address, offset) asm volatile ("lwr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+#define CVMX_LWL(srcdest, address, offset) asm volatile ("lwl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+#define CVMX_LDR(srcdest, address, offset) asm volatile ("ldr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+#define CVMX_LDL(srcdest, address, offset) asm volatile ("ldl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
+
+#define CVMX_SWR(src, address, offset) asm volatile ("swr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
+#define CVMX_SWL(src, address, offset) asm volatile ("swl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
+#define CVMX_SDR(src, address, offset) asm volatile ("sdr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
+#define CVMX_SDL(src, address, offset) asm volatile ("sdl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
+
+
+
+//
+// Useful crypto ASM's
+//
+
+// CRC
+
+#define CVMX_MT_CRC_POLYNOMIAL(val) asm volatile ("dmtc2 %[rt],0x4200" : : [rt] "d" (val))
+#define CVMX_MT_CRC_IV(val) asm volatile ("dmtc2 %[rt],0x0201" : : [rt] "d" (val))
+#define CVMX_MT_CRC_LEN(val) asm volatile ("dmtc2 %[rt],0x1202" : : [rt] "d" (val))
+#define CVMX_MT_CRC_BYTE(val) asm volatile ("dmtc2 %[rt],0x0204" : : [rt] "d" (val))
+#define CVMX_MT_CRC_HALF(val) asm volatile ("dmtc2 %[rt],0x0205" : : [rt] "d" (val))
+#define CVMX_MT_CRC_WORD(val) asm volatile ("dmtc2 %[rt],0x0206" : : [rt] "d" (val))
+#define CVMX_MT_CRC_DWORD(val) asm volatile ("dmtc2 %[rt],0x1207" : : [rt] "d" (val))
+#define CVMX_MT_CRC_VAR(val) asm volatile ("dmtc2 %[rt],0x1208" : : [rt] "d" (val))
+#define CVMX_MT_CRC_POLYNOMIAL_REFLECT(val) asm volatile ("dmtc2 %[rt],0x4210" : : [rt] "d" (val))
+#define CVMX_MT_CRC_IV_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0211" : : [rt] "d" (val))
+#define CVMX_MT_CRC_BYTE_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0214" : : [rt] "d" (val))
+#define CVMX_MT_CRC_HALF_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0215" : : [rt] "d" (val))
+#define CVMX_MT_CRC_WORD_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0216" : : [rt] "d" (val))
+#define CVMX_MT_CRC_DWORD_REFLECT(val) asm volatile ("dmtc2 %[rt],0x1217" : : [rt] "d" (val))
+#define CVMX_MT_CRC_VAR_REFLECT(val) asm volatile ("dmtc2 %[rt],0x1218" : : [rt] "d" (val))
+
+#define CVMX_MF_CRC_POLYNOMIAL(val) asm volatile ("dmfc2 %[rt],0x0200" : [rt] "=d" (val) : )
+#define CVMX_MF_CRC_IV(val) asm volatile ("dmfc2 %[rt],0x0201" : [rt] "=d" (val) : )
+#define CVMX_MF_CRC_IV_REFLECT(val) asm volatile ("dmfc2 %[rt],0x0203" : [rt] "=d" (val) : )
+#define CVMX_MF_CRC_LEN(val) asm volatile ("dmfc2 %[rt],0x0202" : [rt] "=d" (val) : )
+
+// MD5 and SHA-1
+
+// pos can be 0-6
+#define CVMX_MT_HSH_DAT(val,pos) asm volatile ("dmtc2 %[rt],0x0040+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_HSH_DATZ(pos) asm volatile ("dmtc2 $0,0x0040+" CVMX_TMP_STR(pos) : : )
+// pos can be 0-14
+#define CVMX_MT_HSH_DATW(val,pos) asm volatile ("dmtc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_HSH_DATWZ(pos) asm volatile ("dmtc2 $0,0x0240+" CVMX_TMP_STR(pos) : : )
+#define CVMX_MT_HSH_STARTMD5(val) asm volatile ("dmtc2 %[rt],0x4047" : : [rt] "d" (val))
+#define CVMX_MT_HSH_STARTSHA(val) asm volatile ("dmtc2 %[rt],0x4057" : : [rt] "d" (val))
+#define CVMX_MT_HSH_STARTSHA256(val) asm volatile ("dmtc2 %[rt],0x404f" : : [rt] "d" (val))
+#define CVMX_MT_HSH_STARTSHA512(val) asm volatile ("dmtc2 %[rt],0x424f" : : [rt] "d" (val))
+// pos can be 0-3
+#define CVMX_MT_HSH_IV(val,pos) asm volatile ("dmtc2 %[rt],0x0048+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+// pos can be 0-7
+#define CVMX_MT_HSH_IVW(val,pos) asm volatile ("dmtc2 %[rt],0x0250+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+
+// pos can be 0-6
+#define CVMX_MF_HSH_DAT(val,pos) asm volatile ("dmfc2 %[rt],0x0040+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-14
+#define CVMX_MF_HSH_DATW(val,pos) asm volatile ("dmfc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-3
+#define CVMX_MF_HSH_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0048+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-7
+#define CVMX_MF_HSH_IVW(val,pos) asm volatile ("dmfc2 %[rt],0x0250+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+
+// 3DES
+
+// pos can be 0-2
+#define CVMX_MT_3DES_KEY(val,pos) asm volatile ("dmtc2 %[rt],0x0080+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_3DES_IV(val) asm volatile ("dmtc2 %[rt],0x0084" : : [rt] "d" (val))
+#define CVMX_MT_3DES_ENC_CBC(val) asm volatile ("dmtc2 %[rt],0x4088" : : [rt] "d" (val))
+#define CVMX_MT_3DES_ENC(val) asm volatile ("dmtc2 %[rt],0x408a" : : [rt] "d" (val))
+#define CVMX_MT_3DES_DEC_CBC(val) asm volatile ("dmtc2 %[rt],0x408c" : : [rt] "d" (val))
+#define CVMX_MT_3DES_DEC(val) asm volatile ("dmtc2 %[rt],0x408e" : : [rt] "d" (val))
+#define CVMX_MT_3DES_RESULT(val) asm volatile ("dmtc2 %[rt],0x0098" : : [rt] "d" (val))
+
+// pos can be 0-2
+#define CVMX_MF_3DES_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0080+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MF_3DES_IV(val) asm volatile ("dmfc2 %[rt],0x0084" : [rt] "=d" (val) : )
+#define CVMX_MF_3DES_RESULT(val) asm volatile ("dmfc2 %[rt],0x0088" : [rt] "=d" (val) : )
+
+// KASUMI
+
+// pos can be 0-1
+#define CVMX_MT_KAS_KEY(val,pos) CVMX_MT_3DES_KEY(val,pos)
+#define CVMX_MT_KAS_ENC_CBC(val) asm volatile ("dmtc2 %[rt],0x4089" : : [rt] "d" (val))
+#define CVMX_MT_KAS_ENC(val) asm volatile ("dmtc2 %[rt],0x408b" : : [rt] "d" (val))
+#define CVMX_MT_KAS_RESULT(val) CVMX_MT_3DES_RESULT(val)
+
+// pos can be 0-1
+#define CVMX_MF_KAS_KEY(val,pos) CVMX_MF_3DES_KEY(val,pos)
+#define CVMX_MF_KAS_RESULT(val) CVMX_MF_3DES_RESULT(val)
+
+// AES
+
+#define CVMX_MT_AES_ENC_CBC0(val) asm volatile ("dmtc2 %[rt],0x0108" : : [rt] "d" (val))
+#define CVMX_MT_AES_ENC_CBC1(val) asm volatile ("dmtc2 %[rt],0x3109" : : [rt] "d" (val))
+#define CVMX_MT_AES_ENC0(val) asm volatile ("dmtc2 %[rt],0x010a" : : [rt] "d" (val))
+#define CVMX_MT_AES_ENC1(val) asm volatile ("dmtc2 %[rt],0x310b" : : [rt] "d" (val))
+#define CVMX_MT_AES_DEC_CBC0(val) asm volatile ("dmtc2 %[rt],0x010c" : : [rt] "d" (val))
+#define CVMX_MT_AES_DEC_CBC1(val) asm volatile ("dmtc2 %[rt],0x310d" : : [rt] "d" (val))
+#define CVMX_MT_AES_DEC0(val) asm volatile ("dmtc2 %[rt],0x010e" : : [rt] "d" (val))
+#define CVMX_MT_AES_DEC1(val) asm volatile ("dmtc2 %[rt],0x310f" : : [rt] "d" (val))
+// pos can be 0-3
+#define CVMX_MT_AES_KEY(val,pos) asm volatile ("dmtc2 %[rt],0x0104+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+// pos can be 0-1
+#define CVMX_MT_AES_IV(val,pos) asm volatile ("dmtc2 %[rt],0x0102+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+#define CVMX_MT_AES_KEYLENGTH(val) asm volatile ("dmtc2 %[rt],0x0110" : : [rt] "d" (val)) // write the keylen
+// pos can be 0-1
+#define CVMX_MT_AES_RESULT(val,pos) asm volatile ("dmtc2 %[rt],0x0100+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
+
+// pos can be 0-1
+#define CVMX_MF_AES_RESULT(val,pos) asm volatile ("dmfc2 %[rt],0x0100+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-1
+#define CVMX_MF_AES_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0102+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+// pos can be 0-3
+#define CVMX_MF_AES_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0104+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
+#define CVMX_MF_AES_KEYLENGTH(val) asm volatile ("dmfc2 %[rt],0x0110" : [rt] "=d" (val) : ) // read the keylen
+#define CVMX_MF_AES_DAT0(val) asm volatile ("dmfc2 %[rt],0x0111" : [rt] "=d" (val) : ) // first piece of input data
+/* GFM COP2 macros */
+/* index can be 0 or 1 */
+#define CVMX_MF_GFM_MUL(val, index) asm volatile ("dmfc2 %[rt],0x0258+" CVMX_TMP_STR(index) : [rt] "=d" (val) : )
+#define CVMX_MF_GFM_POLY(val) asm volatile ("dmfc2 %[rt],0x025e" : [rt] "=d" (val) : )
+#define CVMX_MF_GFM_RESINP(val, index) asm volatile ("dmfc2 %[rt],0x025a+" CVMX_TMP_STR(index) : [rt] "=d" (val) : )
+
+#define CVMX_MT_GFM_MUL(val, index) asm volatile ("dmtc2 %[rt],0x0258+" CVMX_TMP_STR(index) : : [rt] "d" (val))
+#define CVMX_MT_GFM_POLY(val) asm volatile ("dmtc2 %[rt],0x025e" : : [rt] "d" (val))
+#define CVMX_MT_GFM_RESINP(val, index) asm volatile ("dmtc2 %[rt],0x025a+" CVMX_TMP_STR(index) : : [rt] "d" (val))
+#define CVMX_MT_GFM_XOR0(val) asm volatile ("dmtc2 %[rt],0x025c" : : [rt] "d" (val))
+#define CVMX_MT_GFM_XORMUL1(val) asm volatile ("dmtc2 %[rt],0x425d" : : [rt] "d" (val))
+
+
+/* check_ordering stuff */
+#if 0
+#define CVMX_MF_CHORD(dest) asm volatile ("dmfc2 %[rt],0x400" : [rt] "=d" (dest) : )
+#else
+#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
+#endif
+
+#if 0
+#define CVMX_MF_CYCLE(dest) asm volatile ("dmfc0 %[rt],$9,6" : [rt] "=d" (dest) : ) // Use (64-bit) CvmCount register rather than Count
+#else
+#define CVMX_MF_CYCLE(dest) CVMX_RDHWR(dest, 31) /* reads the current (64-bit) CvmCount value */
+#endif
+
+#define CVMX_MT_CYCLE(src) asm volatile ("dmtc0 %[rt],$9,6" :: [rt] "d" (src))
+
+#define CVMX_MF_CACHE_ERR(val) asm volatile ("dmfc0 %[rt],$27,0" : [rt] "=d" (val):)
+#define CVMX_MF_DCACHE_ERR(val) asm volatile ("dmfc0 %[rt],$27,1" : [rt] "=d" (val):)
+#define CVMX_MF_CVM_MEM_CTL(val) asm volatile ("dmfc0 %[rt],$11,7" : [rt] "=d" (val):)
+#define CVMX_MF_CVM_CTL(val) asm volatile ("dmfc0 %[rt],$9,7" : [rt] "=d" (val):)
+#define CVMX_MT_CACHE_ERR(val) asm volatile ("dmtc0 %[rt],$27,0" : : [rt] "d" (val))
+#define CVMX_MT_DCACHE_ERR(val) asm volatile ("dmtc0 %[rt],$27,1" : : [rt] "d" (val))
+#define CVMX_MT_CVM_MEM_CTL(val) asm volatile ("dmtc0 %[rt],$11,7" : : [rt] "d" (val))
+#define CVMX_MT_CVM_CTL(val) asm volatile ("dmtc0 %[rt],$9,7" : : [rt] "d" (val))
+
+/* Macros for TLB */
+#define CVMX_TLBWI asm volatile ("tlbwi" : : )
+#define CVMX_TLBWR asm volatile ("tlbwr" : : )
+#define CVMX_TLBR asm volatile ("tlbr" : : )
+#define CVMX_MT_ENTRY_HIGH(val) asm volatile ("dmtc0 %[rt],$10,0" : : [rt] "d" (val))
+#define CVMX_MT_ENTRY_LO_0(val) asm volatile ("dmtc0 %[rt],$2,0" : : [rt] "d" (val))
+#define CVMX_MT_ENTRY_LO_1(val) asm volatile ("dmtc0 %[rt],$3,0" : : [rt] "d" (val))
+#define CVMX_MT_PAGEMASK(val) asm volatile ("mtc0 %[rt],$5,0" : : [rt] "d" (val))
+#define CVMX_MT_PAGEGRAIN(val) asm volatile ("mtc0 %[rt],$5,1" : : [rt] "d" (val))
+#define CVMX_MT_TLB_INDEX(val) asm volatile ("mtc0 %[rt],$0,0" : : [rt] "d" (val))
+#define CVMX_MT_TLB_CONTEXT(val) asm volatile ("dmtc0 %[rt],$4,0" : : [rt] "d" (val))
+#define CVMX_MT_TLB_WIRED(val) asm volatile ("mtc0 %[rt],$6,0" : : [rt] "d" (val))
+#define CVMX_MT_TLB_RANDOM(val) asm volatile ("mtc0 %[rt],$1,0" : : [rt] "d" (val))
+#define CVMX_MF_ENTRY_LO_0(val) asm volatile ("dmfc0 %[rt],$2,0" : [rt] "=d" (val):)
+#define CVMX_MF_ENTRY_LO_1(val) asm volatile ("dmfc0 %[rt],$3,0" : [rt] "=d" (val):)
+#define CVMX_MF_ENTRY_HIGH(val) asm volatile ("dmfc0 %[rt],$10,0" : [rt] "=d" (val):)
+#define CVMX_MF_PAGEMASK(val) asm volatile ("mfc0 %[rt],$5,0" : [rt] "=d" (val):)
+#define CVMX_MF_PAGEGRAIN(val) asm volatile ("mfc0 %[rt],$5,1" : [rt] "=d" (val):)
+#define CVMX_MF_TLB_WIRED(val) asm volatile ("mfc0 %[rt],$6,0" : [rt] "=d" (val):)
+#define CVMX_MF_TLB_RANDOM(val) asm volatile ("mfc0 %[rt],$1,0" : [rt] "=d" (val):)
+#define TLB_DIRTY (0x1ULL<<2)
+#define TLB_VALID (0x1ULL<<1)
+#define TLB_GLOBAL (0x1ULL<<0)
+
+
+
+/* assembler macros to guarantee byte loads/stores are used */
+/* for an unaligned 16-bit access (these use AT register) */
+/* we need the hidden argument (__a) so that GCC gets the dependencies right */
+#define CVMX_LOADUNA_INT16(result, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("ulh %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : [rbase] "d" (__a), "m"(__a[offset]), "m"(__a[offset + 1])); }
+#define CVMX_LOADUNA_UINT16(result, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("ulhu %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : [rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1])); }
+#define CVMX_STOREUNA_INT16(data, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("ush %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : "=m"(__a[offset + 0]), "=m"(__a[offset + 1]): [rsrc] "d" (data), [rbase] "d" (__a)); }
+
+#define CVMX_LOADUNA_INT32(result, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("ulw %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : \
+ [rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1]), "m"(__a[offset + 2]), "m"(__a[offset + 3])); }
+#define CVMX_STOREUNA_INT32(data, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("usw %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : \
+ "=m"(__a[offset + 0]), "=m"(__a[offset + 1]), "=m"(__a[offset + 2]), "=m"(__a[offset + 3]) : \
+ [rsrc] "d" (data), [rbase] "d" (__a)); }
+
+#define CVMX_LOADUNA_INT64(result, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("uld %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : \
+ [rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1]), "m"(__a[offset + 2]), "m"(__a[offset + 3]), \
+ "m"(__a[offset + 4]), "m"(__a[offset + 5]), "m"(__a[offset + 6]), "m"(__a[offset + 7])); }
+#define CVMX_STOREUNA_INT64(data, address, offset) \
+ { char *__a = (char *)(address); \
+ asm ("usd %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : \
+ "=m"(__a[offset + 0]), "=m"(__a[offset + 1]), "=m"(__a[offset + 2]), "=m"(__a[offset + 3]), \
+ "=m"(__a[offset + 4]), "=m"(__a[offset + 5]), "=m"(__a[offset + 6]), "=m"(__a[offset + 7]) : \
+ [rsrc] "d" (data), [rbase] "d" (__a)); }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ASM_H__ */
diff --git a/cvmx-asx.h b/cvmx-asx.h
new file mode 100644
index 000000000000..4a49a04b4c89
--- /dev/null
+++ b/cvmx-asx.h
@@ -0,0 +1,66 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the ASX hardware.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+
+#ifndef __CVMX_ASX_H__
+#define __CVMX_ASX_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/cvmx-atomic.h b/cvmx-atomic.h
new file mode 100644
index 000000000000..6446130be32d
--- /dev/null
+++ b/cvmx-atomic.h
@@ -0,0 +1,666 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * This file provides atomic operations
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ *
+ */
+
+
+#ifndef __CVMX_ATOMIC_H__
+#define __CVMX_ATOMIC_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void cvmx_atomic_add32_nosync(int32_t *ptr, int32_t incr)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ uint32_t tmp;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " addu %[tmp], %[inc] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp)
+ : [inc] "r" (incr)
+ : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ " saa %[inc], (%[base]) \n"
+ : "+m" (*ptr)
+ : [inc] "r" (incr), [base] "r" (ptr)
+ : "memory");
+ }
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void cvmx_atomic_add32(int32_t *ptr, int32_t incr)
+{
+ CVMX_SYNCWS;
+ cvmx_atomic_add32_nosync(ptr, incr);
+ CVMX_SYNCWS;
+}
+
+/**
+ * Atomically sets a 32 bit (aligned) memory location to a value
+ *
+ * @param ptr address of memory to set
+ * @param value value to set memory location to.
+ */
+static inline void cvmx_atomic_set32(int32_t *ptr, int32_t value)
+{
+ CVMX_SYNCWS;
+ *ptr = value;
+ CVMX_SYNCWS;
+}
+
+/**
+ * Returns the current value of a 32 bit (aligned) memory
+ * location.
+ *
+ * @param ptr Address of memory to get
+ * @return Value of the memory
+ */
+static inline int32_t cvmx_atomic_get32(int32_t *ptr)
+{
+ return *(volatile int32_t *)ptr;
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void cvmx_atomic_add64_nosync(int64_t *ptr, int64_t incr)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ uint64_t tmp;
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[tmp], %[val] \n"
+ " daddu %[tmp], %[inc] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp)
+ : [inc] "r" (incr)
+ : "memory");
+ }
+ else
+ {
+ __asm__ __volatile__(
+ " saad %[inc], (%[base]) \n"
+ : "+m" (*ptr)
+ : [inc] "r" (incr), [base] "r" (ptr)
+ : "memory");
+ }
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void cvmx_atomic_add64(int64_t *ptr, int64_t incr)
+{
+ CVMX_SYNCWS;
+ cvmx_atomic_add64_nosync(ptr, incr);
+ CVMX_SYNCWS;
+}
+
+/**
+ * Atomically sets a 64 bit (aligned) memory location to a value
+ *
+ * @param ptr address of memory to set
+ * @param value value to set memory location to.
+ */
+static inline void cvmx_atomic_set64(int64_t *ptr, int64_t value)
+{
+ CVMX_SYNCWS;
+ *ptr = value;
+ CVMX_SYNCWS;
+}
+
+/**
+ * Returns the current value of a 64 bit (aligned) memory
+ * location.
+ *
+ * @param ptr Address of memory to get
+ * @return Value of the memory
+ */
+static inline int64_t cvmx_atomic_get64(int64_t *ptr)
+{
+ return *(volatile int64_t *)ptr;
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does no memory synchronization.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline uint32_t cvmx_atomic_compare_and_store32_nosync(uint32_t *ptr, uint32_t old_val, uint32_t new_val)
+{
+ uint32_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " li %[ret], 0 \n"
+ " bne %[tmp], %[old], 2f \n"
+ " move %[tmp], %[new_val] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[ret], 1 \n"
+ "2: nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [old] "r" (old_val), [new_val] "r" (new_val)
+ : "memory");
+
+ return(ret);
+
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does memory synchronization that is required to use this as a locking primitive.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline uint32_t cvmx_atomic_compare_and_store32(uint32_t *ptr, uint32_t old_val, uint32_t new_val)
+{
+ uint32_t ret;
+ CVMX_SYNCWS;
+ ret = cvmx_atomic_compare_and_store32_nosync(ptr, old_val, new_val);
+ CVMX_SYNCWS;
+ return ret;
+
+
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does no memory synchronization.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline uint64_t cvmx_atomic_compare_and_store64_nosync(uint64_t *ptr, uint64_t old_val, uint64_t new_val)
+{
+ uint64_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[tmp], %[val] \n"
+ " li %[ret], 0 \n"
+ " bne %[tmp], %[old], 2f \n"
+ " move %[tmp], %[new_val] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[ret], 1 \n"
+ "2: nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [old] "r" (old_val), [new_val] "r" (new_val)
+ : "memory");
+
+ return(ret);
+
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does memory synchronization that is required to use this as a locking primitive.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline uint64_t cvmx_atomic_compare_and_store64(uint64_t *ptr, uint64_t old_val, uint64_t new_val)
+{
+ uint64_t ret;
+ CVMX_SYNCWS;
+ ret = cvmx_atomic_compare_and_store64_nosync(ptr, old_val, new_val);
+ CVMX_SYNCWS;
+ return ret;
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int64_t cvmx_atomic_fetch_and_add64_nosync(int64_t *ptr, int64_t incr)
+{
+ uint64_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " daddu %[tmp], %[inc] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [inc] "r" (incr)
+ : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int64_t cvmx_atomic_fetch_and_add64(int64_t *ptr, int64_t incr)
+{
+ uint64_t ret;
+ CVMX_SYNCWS;
+ ret = cvmx_atomic_fetch_and_add64_nosync(ptr, incr);
+ CVMX_SYNCWS;
+ return ret;
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int32_t cvmx_atomic_fetch_and_add32_nosync(int32_t *ptr, int32_t incr)
+{
+ uint32_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " addu %[tmp], %[inc] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [inc] "r" (incr)
+ : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int32_t cvmx_atomic_fetch_and_add32(int32_t *ptr, int32_t incr)
+{
+ uint32_t ret;
+ CVMX_SYNCWS;
+ ret = cvmx_atomic_fetch_and_add32_nosync(ptr, incr);
+ CVMX_SYNCWS;
+ return ret;
+}
+
+/**
+ * Atomically set bits in a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to set
+ *
+ * @return Value of memory location before setting bits
+ */
+static inline uint64_t cvmx_atomic_fetch_and_bset64_nosync(uint64_t *ptr, uint64_t mask)
+{
+ uint64_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " or %[tmp], %[msk] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [msk] "r" (mask)
+ : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically set bits in a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to set
+ *
+ * @return Value of memory location before setting bits
+ */
+static inline uint32_t cvmx_atomic_fetch_and_bset32_nosync(uint32_t *ptr, uint32_t mask)
+{
+ uint32_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " or %[tmp], %[msk] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [msk] "r" (mask)
+ : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically clear bits in a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to clear
+ *
+ * @return Value of memory location before clearing bits
+ */
+static inline uint64_t cvmx_atomic_fetch_and_bclr64_nosync(uint64_t *ptr, uint64_t mask)
+{
+ uint64_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ " nor %[msk], 0 \n"
+ "1: lld %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " and %[tmp], %[msk] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [msk] "r" (mask)
+ : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically clear bits in a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to clear
+ *
+ * @return Value of memory location before clearing bits
+ */
+static inline uint32_t cvmx_atomic_fetch_and_bclr32_nosync(uint32_t *ptr, uint32_t mask)
+{
+ uint32_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ " nor %[msk], 0 \n"
+ "1: ll %[tmp], %[val] \n"
+ " move %[ret], %[tmp] \n"
+ " and %[tmp], %[msk] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [msk] "r" (mask)
+ : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically swaps value in 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param new_val new value to write
+ *
+ * @return Value of memory location before swap operation
+ */
+static inline uint64_t cvmx_atomic_swap64_nosync(uint64_t *ptr, uint64_t new_val)
+{
+ uint64_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: lld %[ret], %[val] \n"
+ " move %[tmp], %[new_val] \n"
+ " scd %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [new_val] "r" (new_val)
+ : "memory");
+
+ return (ret);
+}
+
+/**
+ * Atomically swaps value in 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param new_val new value to write
+ *
+ * @return Value of memory location before swap operation
+ */
+static inline uint32_t cvmx_atomic_swap32_nosync(uint32_t *ptr, uint32_t new_val)
+{
+ uint32_t tmp, ret;
+
+ __asm__ __volatile__(
+ ".set noreorder \n"
+ "1: ll %[ret], %[val] \n"
+ " move %[tmp], %[new_val] \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n"
+ : [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
+ : [new_val] "r" (new_val)
+ : "memory");
+
+ return (ret);
+}
+
+/**
+ * This atomic operation is now named cvmx_atomic_compare_and_store32_nosync
+ * and the (deprecated) macro is provided for backward compatibility.
+ * @deprecated
+ */
+#define cvmx_atomic_compare_and_store_nosync32 cvmx_atomic_compare_and_store32_nosync
+
+/**
+ * This atomic operation is now named cvmx_atomic_compare_and_store64_nosync
+ * and the (deprecated) macro is provided for backward compatibility.
+ * @deprecated
+ */
+#define cvmx_atomic_compare_and_store_nosync64 cvmx_atomic_compare_and_store64_nosync
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_ATOMIC_H__ */
diff --git a/cvmx-bootloader.h b/cvmx-bootloader.h
new file mode 100644
index 000000000000..c1097aa33991
--- /dev/null
+++ b/cvmx-bootloader.h
@@ -0,0 +1,147 @@
+/***********************license start***************
+ * Copyright (c) 2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+#ifndef __CVMX_BOOTLOADER__
+#define __CVMX_BOOTLOADER__
+
+
+
+/**
+ * @file
+ *
+ * Bootloader definitions that are shared with other programs
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+
+
+/* The bootloader_header_t structure defines the header that is present
+** at the start of binary u-boot images. This header is used to locate the bootloader
+** image in NAND, and also to allow verification of images for normal NOR booting.
+** This structure is placed at the beginning of a bootloader binary image, and remains
+** in the executable code.
+*/
+#define BOOTLOADER_HEADER_MAGIC 0x424f4f54 /* "BOOT" in ASCII */
+
+#define BOOTLOADER_HEADER_COMMENT_LEN 64
+#define BOOTLOADER_HEADER_VERSION_LEN 64
+#define BOOTLOADER_HEADER_MAX_SIZE 0x200 /* limited by the space to the next exception handler */
+
+#define BOOTLOADER_HEADER_CURRENT_MAJOR_REV 1
+#define BOOTLOADER_HEADER_CURRENT_MINOR_REV 1
+
+/* offsets to struct bootloader_header fields for assembly use */
+#define MAGIC_OFFST 8
+#define HCRC_OFFST 12
+#define HLEN_OFFST 16
+#define DLEN_OFFST 24
+#define DCRC_OFFST 28
+#define GOT_OFFST 48
+
+#define LOOKUP_STEP 8192
+
+#ifndef __ASSEMBLY__
+typedef struct bootloader_header
+{
+ uint32_t jump_instr; /* Jump to executable code following the
+ ** header. This allows this header to
+ ** be (and remain) part of the executable image)
+ */
+ uint32_t nop_instr; /* Must be 0x0 */
+ uint32_t magic; /* Magic number to identify header */
+ uint32_t hcrc; /* CRC of all of header excluding this field */
+
+ uint16_t hlen; /* Length of header in bytes */
+ uint16_t maj_rev; /* Major revision */
+ uint16_t min_rev; /* Minor revision */
+ uint16_t board_type; /* Board type that the image is for */
+
+ uint32_t dlen; /* Length of data (immediately following header) in bytes */
+ uint32_t dcrc; /* CRC of data */
+ uint64_t address; /* Mips virtual address */
+ uint32_t flags;
+ uint16_t image_type; /* Defined in bootloader_image_t enum */
+ uint16_t resv0; /* pad */
+
+ /* The next 4 fields are placed in compile-time, not by the utility */
+ uint32_t got_address; /* compiled got address position in the image */
+ uint32_t got_num_entries; /* number of got entries */
+ uint32_t compiled_start; /* compaled start of the image address */
+ uint32_t image_start; /* relocated start of image address */
+
+ char comment_string[BOOTLOADER_HEADER_COMMENT_LEN]; /* Optional, for descriptive purposes */
+ char version_string[BOOTLOADER_HEADER_VERSION_LEN]; /* Optional, for descriptive purposes */
+} __attribute__((packed)) bootloader_header_t;
+
+
+
+/* Defines for flag field */
+#define BL_HEADER_FLAG_FAILSAFE (1)
+
+
+typedef enum
+{
+ BL_HEADER_IMAGE_UKNOWN = 0x0,
+ BL_HEADER_IMAGE_STAGE2, /* Binary bootloader stage2 image (NAND boot) */
+ BL_HEADER_IMAGE_STAGE3, /* Binary bootloader stage3 image (NAND boot)*/
+ BL_HEADER_IMAGE_NOR, /* Binary bootloader for NOR boot */
+ BL_HEADER_IMAGE_PCIBOOT, /* Binary bootloader for PCI boot */
+ BL_HEADER_IMAGE_UBOOT_ENV, /* Environment for u-boot */
+ BL_HEADER_IMAGE_MAX,
+ /* Range for customer private use. Will not be used by Cavium Networks */
+ BL_HEADER_IMAGE_CUST_RESERVED_MIN = 0x1000,
+ BL_HEADER_IMAGE_CUST_RESERVED_MAX = 0x1fff,
+} bootloader_image_t;
+
+#endif /* __ASSEMBLY__ */
+
+/* Maximum address searched for NAND boot images and environments. This is used
+** by stage1 and stage2. */
+#define MAX_NAND_SEARCH_ADDR 0x400000
+
+
+/* Defines for RAM based environment set by the host or the previous bootloader
+** in a chain boot configuration. */
+
+#define U_BOOT_RAM_ENV_ADDR (0x1000)
+#define U_BOOT_RAM_ENV_SIZE (0x1000)
+#define U_BOOT_RAM_ENV_CRC_SIZE (0x4)
+
+#endif /* __CVMX_BOOTLOADER__ */
diff --git a/cvmx-bootmem.c b/cvmx-bootmem.c
new file mode 100644
index 000000000000..55aea601b70d
--- /dev/null
+++ b/cvmx-bootmem.c
@@ -0,0 +1,952 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+/**
+ * @file
+ * Simple allocate only memory allocator. Used to allocate memory at application
+ * start time.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+
+#include "cvmx.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-bootmem.h"
+
+
+//#define DEBUG
+
+
+#undef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+
+#undef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#define ALIGN_ADDR_UP(addr, align) (((addr) + (~(align))) & (align))
+
+static CVMX_SHARED cvmx_bootmem_desc_t *cvmx_bootmem_desc = NULL;
+
+/* See header file for descriptions of functions */
+
+/* Wrapper functions are provided for reading/writing the size and next block
+** values as these may not be directly addressible (in 32 bit applications, for instance.)
+*/
+/* Offsets of data elements in bootmem list, must match cvmx_bootmem_block_header_t */
+#define NEXT_OFFSET 0
+#define SIZE_OFFSET 8
+static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
+{
+ cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
+}
+static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
+{
+ cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
+}
+static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
+{
+ return(cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63)));
+}
+static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
+{
+ return(cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63)));
+}
+
+
+/* This functions takes an address range and adjusts it as necessary to
+** match the ABI that is currently being used. This is required to ensure
+** that bootmem_alloc* functions only return valid pointers for 32 bit ABIs */
+static int __cvmx_validate_mem_range(uint64_t *min_addr_ptr, uint64_t *max_addr_ptr)
+{
+
+#if defined(__linux__) && defined(CVMX_ABI_N32)
+ {
+ extern uint64_t linux_mem32_min;
+ extern uint64_t linux_mem32_max;
+ /* For 32 bit Linux apps, we need to restrict the allocations to the range
+ ** of memory configured for access from userspace. Also, we need to add mappings
+ ** for the data structures that we access.*/
+
+ /* Narrow range requests to be bounded by the 32 bit limits. octeon_phy_mem_block_alloc()
+ ** will reject inconsistent req_size/range requests, so we don't repeat those checks here.
+ ** If max unspecified, set to 32 bit maximum. */
+ *min_addr_ptr = MIN(MAX(*min_addr_ptr, linux_mem32_min), linux_mem32_max);
+ if (!*max_addr_ptr)
+ *max_addr_ptr = linux_mem32_max;
+ else
+ *max_addr_ptr = MAX(MIN(*max_addr_ptr, linux_mem32_max), linux_mem32_min);
+ }
+#elif defined(CVMX_ABI_N32)
+ {
+ uint32_t max_phys = 0x0FFFFFFF; /* Max physical address when 1-1 mappings not used */
+#if CVMX_USE_1_TO_1_TLB_MAPPINGS
+ max_phys = 0x7FFFFFFF;
+#endif
+ /* We are are running standalone simple executive, so we need to limit the range
+ ** that we allocate from */
+
+ /* Narrow range requests to be bounded by the 32 bit limits. octeon_phy_mem_block_alloc()
+ ** will reject inconsistent req_size/range requests, so we don't repeat those checks here.
+ ** If max unspecified, set to 32 bit maximum. */
+ *min_addr_ptr = MIN(MAX(*min_addr_ptr, 0x0), max_phys);
+ if (!*max_addr_ptr)
+ *max_addr_ptr = max_phys;
+ else
+ *max_addr_ptr = MAX(MIN(*max_addr_ptr, max_phys), 0x0);
+ }
+#endif
+
+ return 0;
+}
+
+
+void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr)
+{
+ int64_t address;
+
+ __cvmx_validate_mem_range(&min_addr, &max_addr);
+ address = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
+
+ if (address > 0)
+ return cvmx_phys_to_ptr(address);
+ else
+ return NULL;
+}
+
+void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, uint64_t alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, address, address + size);
+}
+
+
+void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
+}
+
+void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name)
+{
+ int64_t addr;
+
+ __cvmx_validate_mem_range(&min_addr, &max_addr);
+ addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, align, name, 0);
+ if (addr >= 0)
+ return cvmx_phys_to_ptr(addr);
+ else
+ return NULL;
+
+}
+void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, char *name)
+{
+ return(cvmx_bootmem_alloc_named_range(size, address, address + size, 0, name));
+}
+void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name)
+{
+ return(cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name));
+}
+
+int cvmx_bootmem_free_named(char *name)
+{
+ return(cvmx_bootmem_phy_named_block_free(name, 0));
+}
+
+cvmx_bootmem_named_block_desc_t * cvmx_bootmem_find_named_block(char *name)
+{
+ return(cvmx_bootmem_phy_named_block_find(name, 0));
+}
+
+void cvmx_bootmem_print_named(void)
+{
+ cvmx_bootmem_phy_named_block_print();
+}
+
+#if defined(__linux__) && defined(CVMX_ABI_N32)
+cvmx_bootmem_named_block_desc_t *linux32_named_block_array_ptr;
+#endif
+
+int cvmx_bootmem_init(void *mem_desc_ptr)
+{
+ /* Verify that the size of cvmx_spinlock_t meets our assumptions */
+ if (sizeof(cvmx_spinlock_t) != 4)
+ {
+ cvmx_dprintf("ERROR: Unexpected size of cvmx_spinlock_t\n");
+ return(-1);
+ }
+
+ /* Here we set the global pointer to the bootmem descriptor block. This pointer will
+ ** be used directly, so we will set it up to be directly usable by the application.
+ ** It is set up as follows for the various runtime/ABI combinations:
+ ** Linux 64 bit: Set XKPHYS bit
+ ** Linux 32 bit: use mmap to create mapping, use virtual address
+ ** CVMX 64 bit: use physical address directly
+ ** CVMX 32 bit: use physical address directly
+ ** Note that the CVMX environment assumes the use of 1-1 TLB mappings so that the physical addresses
+ ** can be used directly
+ */
+ if (!cvmx_bootmem_desc)
+ {
+#if defined(CVMX_BUILD_FOR_LINUX_USER) && defined(CVMX_ABI_N32)
+ void *base_ptr;
+ /* For 32 bit, we need to use mmap to create a mapping for the bootmem descriptor */
+ int dm_fd = open("/dev/mem", O_RDWR);
+ if (dm_fd < 0)
+ {
+ cvmx_dprintf("ERROR opening /dev/mem for boot descriptor mapping\n");
+ return(-1);
+ }
+
+ base_ptr = mmap(NULL,
+ sizeof(cvmx_bootmem_desc_t) + sysconf(_SC_PAGESIZE),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ dm_fd,
+ ((off_t)mem_desc_ptr) & ~(sysconf(_SC_PAGESIZE) - 1));
+
+ if (MAP_FAILED == base_ptr)
+ {
+ cvmx_dprintf("Error mapping bootmem descriptor!\n");
+ close(dm_fd);
+ return(-1);
+ }
+
+ /* Adjust pointer to point to bootmem_descriptor, rather than start of page it is in */
+ cvmx_bootmem_desc = (cvmx_bootmem_desc_t*)((char*)base_ptr + (((off_t)mem_desc_ptr) & (sysconf(_SC_PAGESIZE) - 1)));
+
+ /* Also setup mapping for named memory block desc. while we are at it. Here we must keep another
+ ** pointer around, as the value in the bootmem descriptor is shared with other applications. */
+ base_ptr = mmap(NULL,
+ sizeof(cvmx_bootmem_named_block_desc_t) * cvmx_bootmem_desc->named_block_num_blocks + sysconf(_SC_PAGESIZE),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ dm_fd,
+ ((off_t)cvmx_bootmem_desc->named_block_array_addr) & ~(sysconf(_SC_PAGESIZE) - 1));
+
+ close(dm_fd);
+
+ if (MAP_FAILED == base_ptr)
+ {
+ cvmx_dprintf("Error mapping named block descriptor!\n");
+ return(-1);
+ }
+
+ /* Adjust pointer to point to named block array, rather than start of page it is in */
+ linux32_named_block_array_ptr = (cvmx_bootmem_named_block_desc_t*)((char*)base_ptr + (((off_t)cvmx_bootmem_desc->named_block_array_addr) & (sysconf(_SC_PAGESIZE) - 1)));
+
+#elif (defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CVMX_BUILD_FOR_LINUX_USER)) && defined(CVMX_ABI_64)
+ /* Set XKPHYS bit */
+ cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
+#else
+ cvmx_bootmem_desc = (cvmx_bootmem_desc_t*)mem_desc_ptr;
+#endif
+ }
+
+
+ return(0);
+}
+
+
+uint64_t cvmx_bootmem_available_mem(uint64_t min_block_size)
+{
+ return(cvmx_bootmem_phy_available_mem(min_block_size));
+}
+
+
+
+
+
+/*********************************************************************
+** The cvmx_bootmem_phy* functions below return 64 bit physical addresses,
+** and expose more features that the cvmx_bootmem_functions above. These are
+** required for full memory space access in 32 bit applications, as well as for
+** using some advance features.
+** Most applications should not need to use these.
+**
+**/
+
+
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t address_max, uint64_t alignment, uint32_t flags)
+{
+
+ uint64_t head_addr;
+ uint64_t ent_addr;
+ uint64_t prev_addr = 0; /* points to previous list entry, NULL current entry is head of list */
+ uint64_t new_ent_addr = 0;
+ uint64_t desired_min_addr;
+ uint64_t alignment_mask = ~(alignment - 1);
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
+ (unsigned long long)req_size, (unsigned long long)address_min, (unsigned long long)address_max, (unsigned long long)alignment);
+#endif
+
+ if (cvmx_bootmem_desc->major_version > 3)
+ {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
+ goto error_out;
+ }
+
+ /* Do a variety of checks to validate the arguments. The allocator code will later assume
+ ** that these checks have been made. We validate that the requested constraints are not
+ ** self-contradictory before we look through the list of available memory
+ */
+
+ /* 0 is not a valid req_size for this allocator */
+ if (!req_size)
+ goto error_out;
+
+ /* Round req_size up to mult of minimum alignment bytes */
+ req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) & ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+ /* Convert !0 address_min and 0 address_max to special case of range that specifies an exact
+ ** memory block to allocate. Do this before other checks and adjustments so that this tranformation will be validated */
+ if (address_min && !address_max)
+ address_max = address_min + req_size;
+ else if (!address_min && !address_max)
+ address_max = ~0ull; /* If no limits given, use max limits */
+
+
+
+
+ /* Enforce minimum alignment (this also keeps the minimum free block
+ ** req_size the same as the alignment req_size */
+ if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
+ {
+ alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
+ }
+ alignment_mask = ~(alignment - 1);
+
+ /* Adjust address minimum based on requested alignment (round up to meet alignment). Do this here so we can
+ ** reject impossible requests up front. (NOP for address_min == 0) */
+ if (alignment)
+ address_min = (address_min + (alignment - 1)) & ~(alignment - 1);
+
+
+ /* Reject inconsistent args. We have adjusted these, so this may fail due to our internal changes
+ ** even if this check would pass for the values the user supplied. */
+ if (req_size > address_max - address_min)
+ goto error_out;
+
+ /* Walk through the list entries - first fit found is returned */
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ head_addr = cvmx_bootmem_desc->head_addr;
+ ent_addr = head_addr;
+ while (ent_addr)
+ {
+ uint64_t usable_base, usable_max;
+ uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
+
+ if (cvmx_bootmem_phy_get_next(ent_addr) && ent_addr > cvmx_bootmem_phy_get_next(ent_addr))
+ {
+ cvmx_dprintf("Internal bootmem_alloc() error: ent: 0x%llx, next: 0x%llx\n",
+ (unsigned long long)ent_addr, (unsigned long long)cvmx_bootmem_phy_get_next(ent_addr));
+ goto error_out;
+ }
+
+ /* Determine if this is an entry that can satisify the request */
+ /* Check to make sure entry is large enough to satisfy request */
+ usable_base = ALIGN_ADDR_UP(MAX(address_min, ent_addr), alignment_mask);
+ usable_max = MIN(address_max, ent_addr + ent_size);
+ /* We should be able to allocate block at address usable_base */
+
+ desired_min_addr = usable_base;
+
+ /* Determine if request can be satisfied from the current entry */
+ if ((((ent_addr + ent_size) > usable_base && ent_addr < address_max))
+ && req_size <= usable_max - usable_base)
+ {
+ /* We have found an entry that has room to satisfy the request, so allocate it from this entry */
+
+ /* If end CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from the end of this block
+ ** rather than the beginning */
+ if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC)
+ {
+ desired_min_addr = usable_max - req_size;
+ /* Align desired address down to required alignment */
+ desired_min_addr &= alignment_mask;
+ }
+
+ /* Match at start of entry */
+ if (desired_min_addr == ent_addr)
+ {
+ if (req_size < ent_size)
+ {
+ /* big enough to create a new block from top portion of block */
+ new_ent_addr = ent_addr + req_size;
+ cvmx_bootmem_phy_set_next(new_ent_addr, cvmx_bootmem_phy_get_next(ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr, ent_size - req_size);
+
+ /* Adjust next pointer as following code uses this */
+ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+ }
+
+ /* adjust prev ptr or head to remove this entry from list */
+ if (prev_addr)
+ {
+ cvmx_bootmem_phy_set_next(prev_addr, cvmx_bootmem_phy_get_next(ent_addr));
+ }
+ else
+ {
+ /* head of list being returned, so update head ptr */
+ cvmx_bootmem_desc->head_addr = cvmx_bootmem_phy_get_next(ent_addr);
+ }
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ return(desired_min_addr);
+ }
+
+
+ /* block returned doesn't start at beginning of entry, so we know
+ ** that we will be splitting a block off the front of this one. Create a new block
+ ** from the beginning, add to list, and go to top of loop again.
+ **
+ ** create new block from high portion of block, so that top block
+ ** starts at desired addr
+ **/
+ new_ent_addr = desired_min_addr;
+ cvmx_bootmem_phy_set_next(new_ent_addr, cvmx_bootmem_phy_get_next(ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr, cvmx_bootmem_phy_get_size(ent_addr) - (desired_min_addr - ent_addr));
+ cvmx_bootmem_phy_set_size(ent_addr, desired_min_addr - ent_addr);
+ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+ /* Loop again to handle actual alloc from new block */
+ }
+
+ prev_addr = ent_addr;
+ ent_addr = cvmx_bootmem_phy_get_next(ent_addr);
+ }
+error_out:
+ /* We didn't find anything, so return error */
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ return(-1);
+}
+
+
+
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
+{
+ uint64_t cur_addr;
+ uint64_t prev_addr = 0; /* zero is invalid */
+ int retval = 0;
+
+#ifdef DEBUG
+ cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n", (unsigned long long)phy_addr, (unsigned long long)size);
+#endif
+ if (cvmx_bootmem_desc->major_version > 3)
+ {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
+ return(0);
+ }
+
+ /* 0 is not a valid size for this allocator */
+ if (!size)
+ return(0);
+
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ cur_addr = cvmx_bootmem_desc->head_addr;
+ if (cur_addr == 0 || phy_addr < cur_addr)
+ {
+ /* add at front of list - special case with changing head ptr */
+ if (cur_addr && phy_addr + size > cur_addr)
+ goto bootmem_free_done; /* error, overlapping section */
+ else if (phy_addr + size == cur_addr)
+ {
+ /* Add to front of existing first block */
+ cvmx_bootmem_phy_set_next(phy_addr, cvmx_bootmem_phy_get_next(cur_addr));
+ cvmx_bootmem_phy_set_size(phy_addr, cvmx_bootmem_phy_get_size(cur_addr) + size);
+ cvmx_bootmem_desc->head_addr = phy_addr;
+
+ }
+ else
+ {
+ /* New block before first block */
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr); /* OK if cur_addr is 0 */
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_desc->head_addr = phy_addr;
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* Find place in list to add block */
+ while (cur_addr && phy_addr > cur_addr)
+ {
+ prev_addr = cur_addr;
+ cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
+ }
+
+ if (!cur_addr)
+ {
+ /* We have reached the end of the list, add on to end, checking
+ ** to see if we need to combine with last block
+ **/
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == phy_addr)
+ {
+ cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size(prev_addr) + size);
+ }
+ else
+ {
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, 0);
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ }
+ else
+ {
+ /* insert between prev and cur nodes, checking for merge with either/both */
+
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == phy_addr)
+ {
+ /* Merge with previous */
+ cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size(prev_addr) + size);
+ if (phy_addr + size == cur_addr)
+ {
+ /* Also merge with current */
+ cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size(cur_addr) + cvmx_bootmem_phy_get_size(prev_addr));
+ cvmx_bootmem_phy_set_next(prev_addr, cvmx_bootmem_phy_get_next(cur_addr));
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ }
+ else if (phy_addr + size == cur_addr)
+ {
+ /* Merge with current */
+ cvmx_bootmem_phy_set_size(phy_addr, cvmx_bootmem_phy_get_size(cur_addr) + size);
+ cvmx_bootmem_phy_set_next(phy_addr, cvmx_bootmem_phy_get_next(cur_addr));
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* It is a standalone block, add in between prev and cur */
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+
+
+ }
+ retval = 1;
+
+bootmem_free_done:
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ return(retval);
+
+}
+
+
+
+void cvmx_bootmem_phy_list_print(void)
+{
+ uint64_t addr;
+
+ addr = cvmx_bootmem_desc->head_addr;
+ cvmx_dprintf("\n\n\nPrinting bootmem block list, descriptor: %p, head is 0x%llx\n",
+ cvmx_bootmem_desc, (unsigned long long)addr);
+ cvmx_dprintf("Descriptor version: %d.%d\n", (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version);
+ if (cvmx_bootmem_desc->major_version > 3)
+ {
+ cvmx_dprintf("Warning: Bootmem descriptor version is newer than expected\n");
+ }
+ if (!addr)
+ {
+ cvmx_dprintf("mem list is empty!\n");
+ }
+ while (addr)
+ {
+ cvmx_dprintf("Block address: 0x%08qx, size: 0x%08qx, next: 0x%08qx\n",
+ (unsigned long long)addr,
+ (unsigned long long)cvmx_bootmem_phy_get_size(addr),
+ (unsigned long long)cvmx_bootmem_phy_get_next(addr));
+ addr = cvmx_bootmem_phy_get_next(addr);
+ }
+ cvmx_dprintf("\n\n");
+
+}
+
+
+uint64_t cvmx_bootmem_phy_available_mem(uint64_t min_block_size)
+{
+ uint64_t addr;
+
+ uint64_t available_mem = 0;
+
+ cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ addr = cvmx_bootmem_desc->head_addr;
+ while (addr)
+ {
+ if (cvmx_bootmem_phy_get_size(addr) >= min_block_size)
+ available_mem += cvmx_bootmem_phy_get_size(addr);
+ addr = cvmx_bootmem_phy_get_next(addr);
+ }
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ return(available_mem);
+
+}
+
+
+
+cvmx_bootmem_named_block_desc_t * cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
+{
+ unsigned int i;
+ cvmx_bootmem_named_block_desc_t *named_block_array_ptr;
+
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
+#endif
+ /* Lock the structure to make sure that it is not being changed while we are
+ ** examining it.
+ */
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+
+#if defined(__linux__) && !defined(CONFIG_OCTEON_U_BOOT)
+#ifdef CVMX_ABI_N32
+ /* Need to use mmapped named block pointer in 32 bit linux apps */
+extern cvmx_bootmem_named_block_desc_t *linux32_named_block_array_ptr;
+ named_block_array_ptr = linux32_named_block_array_ptr;
+#else
+ /* Use XKPHYS for 64 bit linux */
+ named_block_array_ptr = (cvmx_bootmem_named_block_desc_t *)cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
+#endif
+#else
+ /* Simple executive case. (and u-boot)
+ ** This could be in the low 1 meg of memory that is not 1-1 mapped, so we need use XKPHYS/KSEG0 addressing for it */
+ named_block_array_ptr = CASTPTR(cvmx_bootmem_named_block_desc_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,cvmx_bootmem_desc->named_block_array_addr));
+#endif
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n", named_block_array_ptr);
+#endif
+ if (cvmx_bootmem_desc->major_version == 3)
+ {
+ for (i = 0; i < cvmx_bootmem_desc->named_block_num_blocks; i++)
+ {
+ if ((name && named_block_array_ptr[i].size && !strncmp(name, named_block_array_ptr[i].name, cvmx_bootmem_desc->named_block_name_len - 1))
+ || (!name && !named_block_array_ptr[i].size))
+ {
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+
+ return(&(named_block_array_ptr[i]));
+ }
+ }
+ }
+ else
+ {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
+ }
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+
+ return(NULL);
+}
+
+int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
+{
+ cvmx_bootmem_named_block_desc_t *named_block_ptr;
+
+ if (cvmx_bootmem_desc->major_version != 3)
+ {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
+ return(0);
+ }
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
+#endif
+
+ /* Take lock here, as name lookup/block free/name free need to be atomic */
+ cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+
+ named_block_ptr = cvmx_bootmem_phy_named_block_find(name, CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (named_block_ptr)
+ {
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s, base: 0x%llx, size: 0x%llx\n", name, (unsigned long long)named_block_ptr->base_addr, (unsigned long long)named_block_ptr->size);
+#endif
+ __cvmx_bootmem_phy_free(named_block_ptr->base_addr, named_block_ptr->size, CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ named_block_ptr->size = 0;
+ /* Set size to zero to indicate block not used. */
+ }
+
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+
+ return(!!named_block_ptr); /* 0 on failure, 1 on success */
+}
+
+
+
+
+
+int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, char *name, uint32_t flags)
+{
+ int64_t addr_allocated;
+ cvmx_bootmem_named_block_desc_t *named_block_desc_ptr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: 0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
+ (unsigned long long)size,
+ (unsigned long long)min_addr,
+ (unsigned long long)max_addr,
+ (unsigned long long)alignment,
+ name);
+#endif
+ if (cvmx_bootmem_desc->major_version != 3)
+ {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
+ return(-1);
+ }
+
+
+ /* Take lock here, as name lookup/block alloc/name add need to be atomic */
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+
+ /* Get pointer to first available named block descriptor */
+ named_block_desc_ptr = cvmx_bootmem_phy_named_block_find(NULL, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ /* Check to see if name already in use, return error if name
+ ** not available or no more room for blocks.
+ */
+ if (cvmx_bootmem_phy_named_block_find(name, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr)
+ {
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ return(-1);
+ }
+
+
+ /* Round size up to mult of minimum alignment bytes
+ ** We need the actual size allocated to allow for blocks to be coallesced
+ ** when they are freed. The alloc routine does the same rounding up
+ ** on all allocations. */
+ size = (size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) & ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+ addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (addr_allocated >= 0)
+ {
+ named_block_desc_ptr->base_addr = addr_allocated;
+ named_block_desc_ptr->size = size;
+ strncpy(named_block_desc_ptr->name, name, cvmx_bootmem_desc->named_block_name_len);
+ named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;
+ }
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+
+ return(addr_allocated);
+}
+
+
+
+
+void cvmx_bootmem_phy_named_block_print(void)
+{
+ unsigned int i;
+ int printed = 0;
+
+#if defined(__linux__) && !defined(CONFIG_OCTEON_U_BOOT)
+#ifdef CVMX_ABI_N32
+ /* Need to use mmapped named block pointer in 32 bit linux apps */
+extern cvmx_bootmem_named_block_desc_t *linux32_named_block_array_ptr;
+ cvmx_bootmem_named_block_desc_t *named_block_array_ptr = linux32_named_block_array_ptr;
+#else
+ /* Use XKPHYS for 64 bit linux */
+ cvmx_bootmem_named_block_desc_t *named_block_array_ptr = (cvmx_bootmem_named_block_desc_t *)cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
+#endif
+#else
+ /* Simple executive case. (and u-boot)
+ ** This could be in the low 1 meg of memory that is not 1-1 mapped, so we need use XKPHYS/KSEG0 addressing for it */
+ cvmx_bootmem_named_block_desc_t *named_block_array_ptr = CASTPTR(cvmx_bootmem_named_block_desc_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,cvmx_bootmem_desc->named_block_array_addr));
+#endif
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_print, desc addr: %p\n", cvmx_bootmem_desc);
+#endif
+ if (cvmx_bootmem_desc->major_version != 3)
+ {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc);
+ return;
+ }
+ cvmx_dprintf("List of currently allocated named bootmem blocks:\n");
+ for (i = 0; i < cvmx_bootmem_desc->named_block_num_blocks; i++)
+ {
+ if (named_block_array_ptr[i].size)
+ {
+ printed++;
+ cvmx_dprintf("Name: %s, address: 0x%08qx, size: 0x%08qx, index: %d\n",
+ named_block_array_ptr[i].name,
+ (unsigned long long)named_block_array_ptr[i].base_addr,
+ (unsigned long long)named_block_array_ptr[i].size,
+ i);
+
+ }
+ }
+ if (!printed)
+ {
+ cvmx_dprintf("No named bootmem blocks exist.\n");
+ }
+
+}
+
+
+/* Real physical addresses of memory regions */
+#define OCTEON_DDR0_BASE (0x0ULL)
+#define OCTEON_DDR0_SIZE (0x010000000ULL)
+#define OCTEON_DDR1_BASE (0x410000000ULL)
+#define OCTEON_DDR1_SIZE (0x010000000ULL)
+#define OCTEON_DDR2_BASE (0x020000000ULL)
+#define OCTEON_DDR2_SIZE (0x3e0000000ULL)
+#define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
+int64_t cvmx_bootmem_phy_mem_list_init(uint64_t mem_size, uint32_t low_reserved_bytes, cvmx_bootmem_desc_t *desc_buffer)
+{
+ uint64_t cur_block_addr;
+ int64_t addr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_mem_list_init (arg desc ptr: %p, cvmx_bootmem_desc: %p)\n", desc_buffer, cvmx_bootmem_desc);
+#endif
+
+ /* Descriptor buffer needs to be in 32 bit addressable space to be compatible with
+ ** 32 bit applications */
+ if (!desc_buffer)
+ {
+ cvmx_dprintf("ERROR: no memory for cvmx_bootmem descriptor provided\n");
+ return 0;
+ }
+
+ if (mem_size > OCTEON_MAX_PHY_MEM_SIZE)
+ {
+ mem_size = OCTEON_MAX_PHY_MEM_SIZE;
+ cvmx_dprintf("ERROR: requested memory size too large, truncating to maximum size\n");
+ }
+
+ if (cvmx_bootmem_desc)
+ return 1;
+
+ /* Initialize cvmx pointer to descriptor */
+ cvmx_bootmem_init(desc_buffer);
+
+ /* Set up global pointer to start of list, exclude low 64k for exception vectors, space for global descriptor */
+ memset(cvmx_bootmem_desc, 0x0, sizeof(cvmx_bootmem_desc_t));
+ /* Set version of bootmem descriptor */
+ cvmx_bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
+ cvmx_bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
+
+ cur_block_addr = cvmx_bootmem_desc->head_addr = (OCTEON_DDR0_BASE + low_reserved_bytes);
+
+ cvmx_bootmem_desc->head_addr = 0;
+
+ if (mem_size <= OCTEON_DDR0_SIZE)
+ {
+ __cvmx_bootmem_phy_free(cur_block_addr, mem_size - low_reserved_bytes, 0);
+ goto frees_done;
+ }
+
+ __cvmx_bootmem_phy_free(cur_block_addr, OCTEON_DDR0_SIZE - low_reserved_bytes, 0);
+
+ mem_size -= OCTEON_DDR0_SIZE;
+
+ /* Add DDR2 block next if present */
+ if (mem_size > OCTEON_DDR1_SIZE)
+ {
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
+ __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE, mem_size - OCTEON_DDR1_SIZE, 0);
+ }
+ else
+ {
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
+
+ }
+frees_done:
+
+ /* Initialize the named block structure */
+ cvmx_bootmem_desc->named_block_name_len = CVMX_BOOTMEM_NAME_LEN;
+ cvmx_bootmem_desc->named_block_num_blocks = CVMX_BOOTMEM_NUM_NAMED_BLOCKS;
+ cvmx_bootmem_desc->named_block_array_addr = 0;
+
+ /* Allocate this near the top of the low 256 MBytes of memory */
+ addr = cvmx_bootmem_phy_alloc(CVMX_BOOTMEM_NUM_NAMED_BLOCKS * sizeof(cvmx_bootmem_named_block_desc_t),0, 0x10000000, 0 ,CVMX_BOOTMEM_FLAG_END_ALLOC);
+ if (addr >= 0)
+ cvmx_bootmem_desc->named_block_array_addr = addr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_mem_list_init: named_block_array_addr: 0x%llx)\n", (unsigned long long)cvmx_bootmem_desc->named_block_array_addr);
+#endif
+ if (!cvmx_bootmem_desc->named_block_array_addr)
+ {
+ cvmx_dprintf("FATAL ERROR: unable to allocate memory for bootmem descriptor!\n");
+ return(0);
+ }
+ memset((void *)(unsigned long)cvmx_bootmem_desc->named_block_array_addr, 0x0, CVMX_BOOTMEM_NUM_NAMED_BLOCKS * sizeof(cvmx_bootmem_named_block_desc_t));
+
+ return(1);
+}
+
+
+void cvmx_bootmem_lock(void)
+{
+ cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+}
+
+void cvmx_bootmem_unlock(void)
+{
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+}
+
+void *__cvmx_bootmem_internal_get_desc_ptr(void)
+{
+ return(cvmx_bootmem_desc);
+}
diff --git a/cvmx-bootmem.h b/cvmx-bootmem.h
new file mode 100644
index 000000000000..b44ea02752bb
--- /dev/null
+++ b/cvmx-bootmem.h
@@ -0,0 +1,429 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+/**
+ * @file
+ * Simple allocate only memory allocator. Used to allocate memory at application
+ * start time.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+
+#ifndef __CVMX_BOOTMEM_H__
+#define __CVMX_BOOTMEM_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_BOOTMEM_NAME_LEN 128 /* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64 /* Can change without breaking ABI */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull) /* minimum alignment of bootmem alloced blocks */
+
+/* Flags for cvmx_bootmem_phy_mem* functions */
+#define CVMX_BOOTMEM_FLAG_END_ALLOC (1 << 0) /* Allocate from end of block instead of beginning */
+#define CVMX_BOOTMEM_FLAG_NO_LOCKING (1 << 1) /* Don't do any locking. */
+
+
+/* First bytes of each free physical block of memory contain this structure,
+ * which is used to maintain the free memory list. Since the bootloader is
+ * only 32 bits, there is a union providing 64 and 32 bit versions. The
+ * application init code converts addresses to 64 bit addresses before the
+ * application starts.
+ */
+typedef struct
+{
+ /* Note: these are referenced from assembly routines in the bootloader, so this structure
+ ** should not be changed without changing those routines as well. */
+ uint64_t next_block_addr;
+ uint64_t size;
+
+} cvmx_bootmem_block_header_t;
+
+
+/* Structure for named memory blocks
+** Number of descriptors
+** available can be changed without affecting compatiblity,
+** but name length changes require a bump in the bootmem
+** descriptor version
+** Note: This structure must be naturally 64 bit aligned, as a single
+** memory image will be used by both 32 and 64 bit programs.
+*/
+typedef struct
+{
+ uint64_t base_addr; /**< Base address of named block */
+ uint64_t size; /**< Size actually allocated for named block (may differ from requested) */
+ char name[CVMX_BOOTMEM_NAME_LEN]; /**< name of named block */
+} cvmx_bootmem_named_block_desc_t;
+
+
+
+/* Current descriptor versions */
+#define CVMX_BOOTMEM_DESC_MAJ_VER 3 /* CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MIN_VER 0 /* CVMX bootmem descriptor minor version */
+
+/* First three members of cvmx_bootmem_desc_t are left in original
+** positions for backwards compatibility.
+*/
+typedef struct
+{
+ uint32_t lock; /**< spinlock to control access to list */
+ uint32_t flags; /**< flags for indicating various conditions */
+ uint64_t head_addr;
+
+ uint32_t major_version; /**< incremented changed when incompatible changes made */
+ uint32_t minor_version; /**< incremented changed when compatible changes made, reset to zero when major incremented */
+ uint64_t app_data_addr;
+ uint64_t app_data_size;
+
+ uint32_t named_block_num_blocks; /**< number of elements in named blocks array */
+ uint32_t named_block_name_len; /**< length of name array in bootmem blocks */
+ uint64_t named_block_array_addr; /**< address of named memory block descriptors */
+
+} cvmx_bootmem_desc_t;
+
+
+/**
+ * Initialize the boot alloc memory structures. This is
+ * normally called inside of cvmx_user_app_init()
+ *
+ * @param mem_desc_ptr Address of the free memory list
+ * @return
+ */
+extern int cvmx_bootmem_init(void *mem_desc_ptr);
+
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader.
+ * This is an allocate-only algorithm, so freeing memory is not possible.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param alignment Alignment required - must be power of 2
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader at a specific
+ * address. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated at the specified address.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param address Physical address to allocate memory at. If this memory is not
+ * available, the allocation fails.
+ * @param alignment Alignment required - must be power of 2
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, uint64_t alignment);
+
+
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader within a specified
+ * address range. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated in the requested range.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param min_addr defines the minimum address of the range
+ * @param max_addr defines the maximum address of the range
+ * @param alignment Alignment required - must be power of 2
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr);
+
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param alignment Alignment required - must be power of 2
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name);
+
+
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ *
+ * @param size Size in bytes of block to allocate
+ * @param address Physical address to allocate memory at. If this memory is not
+ * available, the allocation fails.
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, char *name);
+
+
+
+/**
+ * Allocate a block of memory from a specific range of the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ * If request cannot be satisfied within the address range specified, NULL is returned
+ *
+ * @param size Size in bytes of block to allocate
+ * @param min_addr minimum address of range
+ * @param max_addr maximum address of range
+ * @param align Alignment of memory to be allocated. (must be a power of 2)
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name);
+
+/**
+ * Frees a previously allocated named bootmem block.
+ *
+ * @param name name of block to free
+ *
+ * @return 0 on failure,
+ * !0 on success
+ */
+extern int cvmx_bootmem_free_named(char *name);
+
+
+/**
+ * Finds a named bootmem block by name.
+ *
+ * @param name name of block to free
+ *
+ * @return pointer to named block descriptor on success
+ * 0 on failure
+ */
+cvmx_bootmem_named_block_desc_t * cvmx_bootmem_find_named_block(char *name);
+
+
+
+/**
+ * Returns the size of available memory in bytes, only
+ * counting blocks that are at least as big as the minimum block
+ * size.
+ *
+ * @param min_block_size
+ * Minimum block size to count in total.
+ *
+ * @return Number of bytes available for allocation that meet the block size requirement
+ */
+uint64_t cvmx_bootmem_available_mem(uint64_t min_block_size);
+
+
+
+/**
+ * Prints out the list of named blocks that have been allocated
+ * along with their addresses and sizes.
+ * This is primarily used for debugging purposes
+ */
+void cvmx_bootmem_print_named(void);
+
+
+/**
+ * Allocates a block of physical memory from the free list, at (optional) requested address and alignment.
+ *
+ * @param req_size size of region to allocate. All requests are rounded up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ * @param address_min
+ * Minimum address that block can occupy.
+ * @param address_max
+ * Specifies the maximum address_min (inclusive) that the allocation can use.
+ * @param alignment Requested alignment of the block. If this alignment cannot be met, the allocation fails.
+ * This must be a power of 2.
+ * (Note: Alignment of CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and internally enforced. Requested alignments of
+ * less than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return physical address of block allocated, or -1 on failure
+ */
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t address_max, uint64_t alignment, uint32_t flags);
+
+
+
+/**
+ * Allocates a named block of physical memory from the free list, at (optional) requested address and alignment.
+ *
+ * @param size size of region to allocate. All requests are rounded up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ * @param min_addr
+ * Minimum address that block can occupy.
+ * @param max_addr
+ * Specifies the maximum address_min (inclusive) that the allocation can use.
+ * @param alignment Requested alignment of the block. If this alignment cannot be met, the allocation fails.
+ * This must be a power of 2.
+ * (Note: Alignment of CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and internally enforced. Requested alignments of
+ * less than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * @param name name to assign to named block
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return physical address of block allocated, or -1 on failure
+ */
+int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, char *name, uint32_t flags);
+
+
+/**
+ * Finds a named memory block by name.
+ * Also used for finding an unused entry in the named block table.
+ *
+ * @param name Name of memory block to find.
+ * If NULL pointer given, then finds unused descriptor, if available.
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return Pointer to memory block descriptor, NULL if not found.
+ * If NULL returned when name parameter is NULL, then no memory
+ * block descriptors are available.
+ */
+cvmx_bootmem_named_block_desc_t * cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
+
+
+/**
+ * Returns the size of available memory in bytes, only
+ * counting blocks that are at least as big as the minimum block
+ * size.
+ *
+ * @param min_block_size
+ * Minimum block size to count in total.
+ *
+ * @return Number of bytes available for allocation that meet the block size requirement
+ */
+uint64_t cvmx_bootmem_phy_available_mem(uint64_t min_block_size);
+
+/**
+ * Frees a named block.
+ *
+ * @param name name of block to free
+ * @param flags flags for passing options
+ *
+ * @return 0 on failure
+ * 1 on success
+ */
+int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags);
+
+/**
+ * Frees a block to the bootmem allocator list. This must
+ * be used with care, as the size provided must match the size
+ * of the block that was allocated, or the list will become
+ * corrupted.
+ *
+ * IMPORTANT: This is only intended to be used as part of named block
+ * frees and initial population of the free memory list.
+ * *
+ *
+ * @param phy_addr physical address of block
+ * @param size size of block in bytes.
+ * @param flags flags for passing options
+ *
+ * @return 1 on success,
+ * 0 on failure
+ */
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
+
+
+/**
+ * Prints the list of currently allocated named blocks
+ *
+ */
+void cvmx_bootmem_phy_named_block_print(void);
+
+
+/**
+ * Prints the list of available memory.
+ *
+ */
+void cvmx_bootmem_phy_list_print(void);
+
+
+
+/**
+ * This function initializes the free memory list used by cvmx_bootmem.
+ * This must be called before any allocations can be done.
+ *
+ * @param mem_size Total memory available, in bytes
+ * @param low_reserved_bytes
+ * Number of bytes to reserve (leave out of free list) at address 0x0.
+ * @param desc_buffer
+ * Buffer for the bootmem descriptor. This must be a 32 bit addressable
+ * address.
+ *
+ * @return 1 on success
+ * 0 on failure
+ */
+int64_t cvmx_bootmem_phy_mem_list_init(uint64_t mem_size, uint32_t low_reserved_bytes, cvmx_bootmem_desc_t *desc_buffer);
+
+/**
+ * Locks the bootmem allocator. This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_lock(void);
+
+/**
+ * Unlocks the bootmem allocator. This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_unlock(void);
+
+/**
+ * Internal use function to get the current descriptor pointer */
+void *__cvmx_bootmem_internal_get_desc_ptr(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_BOOTMEM_H__ */
diff --git a/cvmx-ciu.h b/cvmx-ciu.h
new file mode 100644
index 000000000000..aca6c4fa9686
--- /dev/null
+++ b/cvmx-ciu.h
@@ -0,0 +1,65 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the hardware Interrupt Unit.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ */
+
+#ifndef __CVMX_CIU_H__
+#define __CVMX_CIU_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/cvmx-cmd-queue.c b/cvmx-cmd-queue.c
new file mode 100644
index 000000000000..5da42e845bba
--- /dev/null
+++ b/cvmx-cmd-queue.c
@@ -0,0 +1,309 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ *
+ * <hr>$Revision: 42150 $<hr>
+ */
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-fpa.h"
+#include "cvmx-cmd-queue.h"
+#include "cvmx-bootmem.h"
+
+/**
+ * This application uses this pointer to access the global queue
+ * state. It points to a bootmem named block.
+ */
+CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr = NULL;
+
+
+/**
+ * @INTERNAL
+ * Initialize the Global queue state pointer.
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
+{
+ char *alloc_name = "cvmx_cmd_queues";
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+ extern uint64_t octeon_reserve32_memory;
+#endif
+
+ if (cvmx_likely(__cvmx_cmd_queue_state_ptr))
+ return CVMX_CMD_QUEUE_SUCCESS;
+
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#if CONFIG_CAVIUM_RESERVE32
+ if (octeon_reserve32_memory)
+ __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
+ octeon_reserve32_memory,
+ octeon_reserve32_memory + (CONFIG_CAVIUM_RESERVE32<<20) - 1,
+ 128, alloc_name);
+ else
+#endif
+ __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 128, alloc_name);
+#else
+ __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 128, alloc_name);
+#endif
+ if (__cvmx_cmd_queue_state_ptr)
+ memset(__cvmx_cmd_queue_state_ptr, 0, sizeof(*__cvmx_cmd_queue_state_ptr));
+ else
+ {
+ cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(alloc_name);
+ if (block_desc)
+ __cvmx_cmd_queue_state_ptr = cvmx_phys_to_ptr(block_desc->base_addr);
+ else
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n", alloc_name);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ }
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @param queue_id Hardware command queue to initialize.
+ * @param max_depth Maximum outstanding commands that can be queued.
+ * @param fpa_pool FPA pool the command queues should come from.
+ * @param pool_size Size of each buffer in the FPA pool (bytes)
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size)
+{
+ __cvmx_cmd_queue_state_t *qstate;
+ cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
+ if (result != CVMX_CMD_QUEUE_SUCCESS)
+ return result;
+
+ qstate = __cvmx_cmd_queue_get_state(queue_id);
+ if (qstate == NULL)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ /* We artificially limit max_depth to 1<<20 words. It is an arbitrary limit */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH)
+ {
+ if ((max_depth < 0) || (max_depth > 1<<20))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ else if (max_depth != 0)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ if ((fpa_pool < 0) || (fpa_pool > 7))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ if ((pool_size < 128) || (pool_size > 65536))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ /* See if someone else has already initialized the queue */
+ if (qstate->base_ptr_div128)
+ {
+ if (max_depth != (int)qstate->max_depth)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initalized with different max_depth (%d).\n", (int)qstate->max_depth);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ if (fpa_pool != qstate->fpa_pool)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initalized with different FPA pool (%u).\n", qstate->fpa_pool);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ if ((pool_size>>3)-1 != qstate->pool_size_m1)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initalized with different FPA pool size (%u).\n", (qstate->pool_size_m1+1)<<3);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ CVMX_SYNCWS;
+ return CVMX_CMD_QUEUE_ALREADY_SETUP;
+ }
+ else
+ {
+ cvmx_fpa_ctl_status_t status;
+ void *buffer;
+
+ status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
+ if (!status.s.enb)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: FPA is not enabled.\n");
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ buffer = cvmx_fpa_alloc(fpa_pool);
+ if (buffer == NULL)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Unable to allocate initial buffer.\n");
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+
+ memset(qstate, 0, sizeof(*qstate));
+ qstate->max_depth = max_depth;
+ qstate->fpa_pool = fpa_pool;
+ qstate->pool_size_m1 = (pool_size>>3)-1;
+ qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
+ /* We zeroed the now serving field so we need to also zero the ticket */
+ __cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
+ CVMX_SYNCWS;
+ return CVMX_CMD_QUEUE_SUCCESS;
+ }
+}
+
+
+/**
+ * Shutdown a queue a free it's command buffers to the FPA. The
+ * hardware connected to the queue must be stopped before this
+ * function is called.
+ *
+ * @param queue_id Queue to shutdown
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+ if (qptr == NULL)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to get queue information.\n");
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ if (cvmx_cmd_queue_length(queue_id) > 0)
+ {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still has data in it.\n");
+ return CVMX_CMD_QUEUE_FULL;
+ }
+
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+ if (qptr->base_ptr_div128)
+ {
+ cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7), qptr->fpa_pool, 0);
+ qptr->base_ptr_div128 = 0;
+ }
+ __cvmx_cmd_queue_unlock(qptr);
+
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+
+/**
+ * Return the number of command words pending in the queue. This
+ * function may be relatively slow for some hardware units.
+ *
+ * @param queue_id Hardware command queue to query
+ *
+ * @return Number of outstanding commands
+ */
+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
+{
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ {
+ if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /* The cast is here so gcc with check that all values in the
+ cvmx_cmd_queue_id_t enumeration are here */
+ switch ((cvmx_cmd_queue_id_t)(queue_id & 0xff0000))
+ {
+ case CVMX_CMD_QUEUE_PKO_BASE:
+ /* FIXME: Need atomic lock on CVMX_PKO_REG_READ_IDX. Right now we
+ are normally called with the queue lock, so that is a SLIGHT
+ amount of protection */
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ cvmx_pko_mem_debug9_t debug9;
+ debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
+ return debug9.cn38xx.doorbell;
+ }
+ else
+ {
+ cvmx_pko_mem_debug8_t debug8;
+ debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
+ return debug8.cn58xx.doorbell;
+ }
+ case CVMX_CMD_QUEUE_ZIP:
+ case CVMX_CMD_QUEUE_DFA:
+ case CVMX_CMD_QUEUE_RAID:
+ // FIXME: Implement other lengths
+ return 0;
+ case CVMX_CMD_QUEUE_DMA_BASE:
+ {
+ cvmx_npei_dmax_counts_t dmax_counts;
+ dmax_counts.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS(queue_id & 0x7));
+ return dmax_counts.s.dbell;
+ }
+ case CVMX_CMD_QUEUE_END:
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+}
+
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access t othe low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @param queue_id Command queue to query
+ *
+ * @return Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+ if (qptr && qptr->base_ptr_div128)
+ return cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ else
+ return NULL;
+}
+
diff --git a/cvmx-cmd-queue.h b/cvmx-cmd-queue.h
new file mode 100644
index 000000000000..3a928d136629
--- /dev/null
+++ b/cvmx-cmd-queue.h
@@ -0,0 +1,604 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ *
+ * The common command queue infrastructure abstracts out the
+ * software necessary for adding to Octeon's chained queue
+ * structures. These structures are used for commands to the
+ * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
+ * hardware unit takes commands and CSRs of different types,
+ * they all use basic linked command buffers to store the
+ * pending request. In general, users of the CVMX API don't
+ * call cvmx-cmd-queue functions directly. Instead the hardware
+ * unit specific wrapper should be used. The wrappers perform
+ * unit specific validation and CSR writes to submit the
+ * commands.
+ *
+ * Even though most software will never directly interact with
+ * cvmx-cmd-queue, knowledge of its internal working can help
+ * in diagnosing performance problems and help with debugging.
+ *
+ * Command queue pointers are stored in a global named block
+ * called "cvmx_cmd_queues". Except for the PKO queues, each
+ * hardware queue is stored in its own cache line to reduce SMP
+ * contention on spin locks. The PKO queues are stored such that
+ * every 16th queue is next to each other in memory. This scheme
+ * allows for queues being in separate cache lines when there
+ * are low number of queues per port. With 16 queues per port,
+ * the first queue for each port is in the same cache area. The
+ * second queues for each port are in another area, etc. This
+ * allows software to implement very efficient lockless PKO with
+ * 16 queues per port using a minimum of cache lines per core.
+ * All queues for a given core will be isolated in the same
+ * cache area.
+ *
+ * In addition to the memory pointer layout, cvmx-cmd-queue
+ * provides an optimized fair ll/sc locking mechanism for the
+ * queues. The lock uses a "ticket / now serving" model to
+ * maintain fair order on contended locks. In addition, it uses
+ * predicted locking time to limit cache contention. When a core
+ * know it must wait in line for a lock, it spins on the
+ * internal cycle counter to completely eliminate any causes of
+ * bus traffic.
+ *
+ * <hr> $Revision: 42150 $ <hr>
+ */
+
+#ifndef __CVMX_CMD_QUEUE_H__
+#define __CVMX_CMD_QUEUE_H__
+
+#include "executive-config.h"
+#include "cvmx-config.h"
+#include "cvmx-fpa.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * By default we disable the max depth support. Most programs
+ * don't use it and it slows down the command queue processing
+ * significantly.
+ */
+#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
+#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
+#endif
+
+/**
+ * Enumeration representing all hardware blocks that use command
+ * queues. Each hardware block has up to 65536 sub identifiers for
+ * multiple command queues. Not all chips support all hardware
+ * units.
+ */
+typedef enum
+{
+ CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
+#define CVMX_CMD_QUEUE_PKO(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
+ CVMX_CMD_QUEUE_ZIP = 0x10000,
+ CVMX_CMD_QUEUE_DFA = 0x20000,
+ CVMX_CMD_QUEUE_RAID = 0x30000,
+ CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
+#define CVMX_CMD_QUEUE_DMA(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
+ CVMX_CMD_QUEUE_END = 0x50000,
+} cvmx_cmd_queue_id_t;
+
+/**
+ * Command write operations can fail if the comamnd queue needs
+ * a new buffer and the associated FPA pool is empty. It can also
+ * fail if the number of queued command words reaches the maximum
+ * set at initialization.
+ */
+typedef enum
+{
+ CVMX_CMD_QUEUE_SUCCESS = 0,
+ CVMX_CMD_QUEUE_NO_MEMORY = -1,
+ CVMX_CMD_QUEUE_FULL = -2,
+ CVMX_CMD_QUEUE_INVALID_PARAM = -3,
+ CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
+} cvmx_cmd_queue_result_t;
+
+typedef struct
+{
+ uint8_t now_serving; /**< You have lock when this is your ticket */
+ uint64_t unused1 : 24;
+ uint32_t max_depth; /**< Maximum outstanding command words */
+ uint64_t fpa_pool : 3; /**< FPA pool buffers come from */
+ uint64_t base_ptr_div128: 29; /**< Top of command buffer pointer shifted 7 */
+ uint64_t unused2 : 6;
+ uint64_t pool_size_m1 : 13; /**< FPA buffer size in 64bit words minus 1 */
+ uint64_t index : 13; /**< Number of comamnds already used in buffer */
+} __cvmx_cmd_queue_state_t;
+
+/**
+ * This structure contains the global state of all comamnd queues.
+ * It is stored in a bootmem named block and shared by all
+ * applications running on Octeon. Tickets are stored in a differnet
+ * cahce line that queue information to reduce the contention on the
+ * ll/sc used to get a ticket. If this is not the case, the update
+ * of queue state causes the ll/sc to fail quite often.
+ */
+typedef struct
+{
+ uint64_t ticket[(CVMX_CMD_QUEUE_END>>16) * 256];
+ __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END>>16) * 256];
+} __cvmx_cmd_queue_all_state_t;
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @param queue_id Hardware command queue to initialize.
+ * @param max_depth Maximum outstanding commands that can be queued.
+ * @param fpa_pool FPA pool the command queues should come from.
+ * @param pool_size Size of each buffer in the FPA pool (bytes)
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size);
+
+/**
+ * Shutdown a queue a free it's command buffers to the FPA. The
+ * hardware connected to the queue must be stopped before this
+ * function is called.
+ *
+ * @param queue_id Queue to shutdown
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * Return the number of command words pending in the queue. This
+ * function may be relatively slow for some hardware units.
+ *
+ * @param queue_id Hardware command queue to query
+ *
+ * @return Number of outstanding commands
+ */
+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access t othe low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @param queue_id Command queue to query
+ *
+ * @return Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * @INTERNAL
+ * Get the index into the state arrays for the supplied queue id.
+ *
+ * @param queue_id Queue ID to get an index for
+ *
+ * @return Index into the state arrays
+ */
+static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
+{
+ /* Warning: This code currently only works with devices that have 256 queues
+ or less. Devices with more than 16 queues are layed out in memory to allow
+ cores quick access to every 16th queue. This reduces cache thrashing
+ when you are running 16 queues per port to support lockless operation */
+ int unit = queue_id>>16;
+ int q = (queue_id >> 4) & 0xf;
+ int core = queue_id & 0xf;
+ return unit*256 + core*16 + q;
+}
+
+
+/**
+ * @INTERNAL
+ * Lock the supplied queue so nobody else is updating it at the same
+ * time as us.
+ *
+ * @param queue_id Queue ID to lock
+ * @param qptr Pointer to the queue's global state
+ */
+static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, __cvmx_cmd_queue_state_t *qptr)
+{
+ extern CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
+ int tmp;
+ int my_ticket;
+ CVMX_PREFETCH(qptr, 0);
+ asm volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %[my_ticket], %[ticket_ptr]\n" /* Atomic add one to ticket_ptr */
+ "li %[ticket], 1\n" /* and store the original value */
+ "baddu %[ticket], %[my_ticket]\n" /* in my_ticket */
+ "sc %[ticket], %[ticket_ptr]\n"
+ "beqz %[ticket], 1b\n"
+ " nop\n"
+ "lbu %[ticket], %[now_serving]\n" /* Load the current now_serving ticket */
+ "2:\n"
+ "beq %[ticket], %[my_ticket], 4f\n" /* Jump out if now_serving == my_ticket */
+ " subu %[ticket], %[my_ticket], %[ticket]\n" /* Find out how many tickets are in front of me */
+ "subu %[ticket], 1\n" /* Use tickets in front of me minus one to delay */
+ "cins %[ticket], %[ticket], 5, 7\n" /* Delay will be ((tickets in front)-1)*32 loops */
+ "3:\n"
+ "bnez %[ticket], 3b\n" /* Loop here until our ticket might be up */
+ " subu %[ticket], 1\n"
+ "b 2b\n" /* Jump back up to check out ticket again */
+ " lbu %[ticket], %[now_serving]\n" /* Load the current now_serving ticket */
+ "4:\n"
+ ".set pop\n"
+ : [ticket_ptr] "=m" (__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+ [now_serving] "=m" (qptr->now_serving),
+ [ticket] "=r" (tmp),
+ [my_ticket] "=r" (my_ticket)
+ );
+}
+
+
+/**
+ * @INTERNAL
+ * Unlock the queue, flushing all writes.
+ *
+ * @param qptr Queue to unlock
+ */
+static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
+{
+ qptr->now_serving++;
+ CVMX_SYNCWS;
+}
+
+
+/**
+ * @INTERNAL
+ * Get the queue state structure for the given queue id
+ *
+ * @param queue_id Queue id to get
+ *
+ * @return Queue structure or NULL on failure
+ */
+static inline __cvmx_cmd_queue_state_t *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
+{
+ extern CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ {
+ if (cvmx_unlikely(queue_id >= CVMX_CMD_QUEUE_END))
+ return NULL;
+ if (cvmx_unlikely((queue_id & 0xffff) >= 256))
+ return NULL;
+ }
+ return &__cvmx_cmd_queue_state_ptr->state[__cvmx_cmd_queue_get_index(queue_id)];
+}
+
+
+/**
+ * Write an arbitrary number of command words to a command queue.
+ * This is a generic function; the fixed number of comamnd word
+ * functions yield higher performance.
+ *
+ * @param queue_id Hardware command queue to write to
+ * @param use_locking
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @param cmd_count Number of command words to write
+ * @param cmds Array of comamnds to write
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t queue_id, int use_locking, int cmd_count, uint64_t *cmds)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ {
+ if (cvmx_unlikely(qptr == NULL))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ if (cvmx_unlikely((cmd_count < 1) || (cmd_count > 32)))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ if (cvmx_unlikely(cmds == NULL))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /* Make sure nobody else is updating the same queue */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /* If a max queue length was specified then make sure we don't
+ exceed it. If any part of the command would be below the limit
+ we allow it */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
+ {
+ if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /* Normally there is plenty of room in the current buffer for the command */
+ if (cvmx_likely(qptr->index + cmd_count < qptr->pool_size_m1))
+ {
+ uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ ptr += qptr->index;
+ qptr->index += cmd_count;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ }
+ else
+ {
+ uint64_t *ptr;
+ int count;
+ /* We need a new comamnd buffer. Fail if there isn't one available */
+ uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
+ if (cvmx_unlikely(new_buffer == NULL))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ /* Figure out how many command words will fit in this buffer. One
+ location will be needed for the next buffer pointer */
+ count = qptr->pool_size_m1 - qptr->index;
+ ptr += qptr->index;
+ cmd_count-=count;
+ while (count--)
+ *ptr++ = *cmds++;
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /* The current buffer is full and has a link to the next buffer. Time
+ to write the rest of the commands into the new buffer */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = cmd_count;
+ ptr = new_buffer;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+
+/**
+ * Simple function to write two command words to a command
+ * queue.
+ *
+ * @param queue_id Hardware command queue to write to
+ * @param use_locking
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @param cmd1 Command
+ * @param cmd2 Command
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ {
+ if (cvmx_unlikely(qptr == NULL))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /* Make sure nobody else is updating the same queue */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /* If a max queue length was specified then make sure we don't
+ exceed it. If any part of the command would be below the limit
+ we allow it */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
+ {
+ if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /* Normally there is plenty of room in the current buffer for the command */
+ if (cvmx_likely(qptr->index + 2 < qptr->pool_size_m1))
+ {
+ uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ ptr += qptr->index;
+ qptr->index += 2;
+ ptr[0] = cmd1;
+ ptr[1] = cmd2;
+ }
+ else
+ {
+ uint64_t *ptr;
+ /* Figure out how many command words will fit in this buffer. One
+ location will be needed for the next buffer pointer */
+ int count = qptr->pool_size_m1 - qptr->index;
+ /* We need a new comamnd buffer. Fail if there isn't one available */
+ uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
+ if (cvmx_unlikely(new_buffer == NULL))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ count--;
+ ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ ptr += qptr->index;
+ *ptr++ = cmd1;
+ if (cvmx_likely(count))
+ *ptr++ = cmd2;
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /* The current buffer is full and has a link to the next buffer. Time
+ to write the rest of the commands into the new buffer */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = 0;
+ if (cvmx_unlikely(count == 0))
+ {
+ qptr->index = 1;
+ new_buffer[0] = cmd2;
+ }
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+
+/**
+ * Simple function to write three command words to a command
+ * queue.
+ *
+ * @param queue_id Hardware command queue to write to
+ * @param use_locking
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @param cmd1 Command
+ * @param cmd2 Command
+ * @param cmd3 Command
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2, uint64_t cmd3)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ if (CVMX_ENABLE_PARAMETER_CHECKING)
+ {
+ if (cvmx_unlikely(qptr == NULL))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /* Make sure nobody else is updating the same queue */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /* If a max queue length was specified then make sure we don't
+ exceed it. If any part of the command would be below the limit
+ we allow it */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
+ {
+ if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /* Normally there is plenty of room in the current buffer for the command */
+ if (cvmx_likely(qptr->index + 3 < qptr->pool_size_m1))
+ {
+ uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ ptr += qptr->index;
+ qptr->index += 3;
+ ptr[0] = cmd1;
+ ptr[1] = cmd2;
+ ptr[2] = cmd3;
+ }
+ else
+ {
+ uint64_t *ptr;
+ /* Figure out how many command words will fit in this buffer. One
+ location will be needed for the next buffer pointer */
+ int count = qptr->pool_size_m1 - qptr->index;
+ /* We need a new comamnd buffer. Fail if there isn't one available */
+ uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
+ if (cvmx_unlikely(new_buffer == NULL))
+ {
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ count--;
+ ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
+ ptr += qptr->index;
+ *ptr++ = cmd1;
+ if (count)
+ {
+ *ptr++ = cmd2;
+ if (count > 1)
+ *ptr++ = cmd3;
+ }
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /* The current buffer is full and has a link to the next buffer. Time
+ to write the rest of the commands into the new buffer */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = 0;
+ ptr = new_buffer;
+ if (count == 0)
+ {
+ *ptr++ = cmd2;
+ qptr->index++;
+ }
+ if (count < 2)
+ {
+ *ptr++ = cmd3;
+ qptr->index++;
+ }
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (cvmx_likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_CMD_QUEUE_H__ */
diff --git a/cvmx-cn3010-evb-hs5.c b/cvmx-cn3010-evb-hs5.c
new file mode 100644
index 000000000000..3a3c43e241e4
--- /dev/null
+++ b/cvmx-cn3010-evb-hs5.c
@@ -0,0 +1,214 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Interface to the EBH-30xx specific devices
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+
+#include <time.h>
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-cn3010-evb-hs5.h"
+#include "cvmx-twsi.h"
+
+
+static inline uint8_t bin2bcd(uint8_t bin)
+{
+ return (bin / 10) << 4 | (bin % 10);
+}
+
+static inline uint8_t bcd2bin(uint8_t bcd)
+{
+ return (bcd >> 4) * 10 + (bcd & 0xf);
+}
+
+#define TM_CHECK(_expr, _msg) \
+ do { \
+ if (_expr) { \
+ cvmx_dprintf("Warning: RTC has invalid %s field\n", (_msg)); \
+ rc = -1; \
+ } \
+ } while(0);
+
+static int validate_tm_struct(struct tm * tms)
+{
+ int rc = 0;
+
+ if (!tms)
+ return -1;
+
+ TM_CHECK(tms->tm_sec < 0 || tms->tm_sec > 60, "second"); /* + Leap sec */
+ TM_CHECK(tms->tm_min < 0 || tms->tm_min > 59, "minute");
+ TM_CHECK(tms->tm_hour < 0 || tms->tm_hour > 23, "hour");
+ TM_CHECK(tms->tm_mday < 1 || tms->tm_mday > 31, "day");
+ TM_CHECK(tms->tm_wday < 0 || tms->tm_wday > 6, "day of week");
+ TM_CHECK(tms->tm_mon < 0 || tms->tm_mon > 11, "month");
+ TM_CHECK(tms->tm_year < 0 || tms->tm_year > 200,"year");
+
+ return rc;
+}
+
+/*
+ * Board-specifc RTC read
+ * Time is expressed in seconds from epoch (Jan 1 1970 at 00:00:00 UTC)
+ * and converted internally to calendar format.
+ */
+uint32_t cvmx_rtc_ds1337_read(void)
+{
+ int i, retry;
+ uint32_t time;
+ uint8_t reg[8];
+ uint8_t sec;
+ struct tm tms;
+
+
+ memset(&reg, 0, sizeof(reg));
+ memset(&tms, 0, sizeof(struct tm));
+
+ for(retry=0; retry<2; retry++)
+ {
+ /* Lockless read: detects the infrequent roll-over and retries */
+ reg[0] = cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0);
+ for(i=1; i<7; i++)
+ reg[i] = cvmx_twsi_read8_cur_addr(CVMX_RTC_DS1337_ADDR);
+
+ sec = cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0);
+ if ((sec & 0xf) == (reg[0] & 0xf))
+ break; /* Time did not roll-over, value is correct */
+ }
+
+ tms.tm_sec = bcd2bin(reg[0] & 0x7f);
+ tms.tm_min = bcd2bin(reg[1] & 0x7f);
+ tms.tm_hour = bcd2bin(reg[2] & 0x3f);
+ if ((reg[2] & 0x40) && (reg[2] & 0x20)) /* AM/PM format and is PM time */
+ {
+ tms.tm_hour = (tms.tm_hour + 12) % 24;
+ }
+ tms.tm_wday = (reg[3] & 0x7) - 1; /* Day of week field is 0..6 */
+ tms.tm_mday = bcd2bin(reg[4] & 0x3f);
+ tms.tm_mon = bcd2bin(reg[5] & 0x1f) - 1; /* Month field is 0..11 */
+ tms.tm_year = ((reg[5] & 0x80) ? 100 : 0) + bcd2bin(reg[6]);
+
+
+ if (validate_tm_struct(&tms))
+ cvmx_dprintf("Warning: RTC calendar is not configured properly\n");
+
+ time = mktime(&tms);
+
+ return time;
+}
+
+/*
+ * Board-specific RTC write
+ * Time returned is in seconds from epoch (Jan 1 1970 at 00:00:00 UTC)
+ */
+int cvmx_rtc_ds1337_write(uint32_t time)
+{
+ int i, rc, retry;
+ struct tm tms;
+ uint8_t reg[8];
+ uint8_t sec;
+ time_t time_from_epoch = time;
+
+
+ localtime_r(&time_from_epoch, &tms);
+
+ if (validate_tm_struct(&tms))
+ {
+ cvmx_dprintf("Error: RTC was passed wrong calendar values, write failed\n");
+ goto tm_invalid;
+ }
+
+ reg[0] = bin2bcd(tms.tm_sec);
+ reg[1] = bin2bcd(tms.tm_min);
+ reg[2] = bin2bcd(tms.tm_hour); /* Force 0..23 format even if using AM/PM */
+ reg[3] = bin2bcd(tms.tm_wday + 1);
+ reg[4] = bin2bcd(tms.tm_mday);
+ reg[5] = bin2bcd(tms.tm_mon + 1);
+ if (tms.tm_year >= 100) /* Set century bit*/
+ {
+ reg[5] |= 0x80;
+ }
+ reg[6] = bin2bcd(tms.tm_year % 100);
+
+ /* Lockless write: detects the infrequent roll-over and retries */
+ for(retry=0; retry<2; retry++)
+ {
+ rc = 0;
+ for(i=0; i<7; i++)
+ {
+ rc |= cvmx_twsi_write8(CVMX_RTC_DS1337_ADDR, i, reg[i]);
+ }
+
+ sec = cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0);
+ if ((sec & 0xf) == (reg[0] & 0xf))
+ break; /* Time did not roll-over, value is correct */
+ }
+
+ return (rc ? -1 : 0);
+
+ tm_invalid:
+ return -1;
+}
+
+#ifdef CVMX_RTC_DEBUG
+
+void cvmx_rtc_ds1337_dump_state(void)
+{
+ int i = 0;
+
+ printf("RTC:\n");
+ printf("%d : %02X ", i, cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0));
+ for(i=1; i<16; i++) {
+ printf("%02X ", cvmx_twsi_read8_cur_addr(CVMX_RTC_DS1337_ADDR));
+ }
+ printf("\n");
+}
+
+#endif /* CVMX_RTC_DEBUG */
diff --git a/cvmx-cn3010-evb-hs5.h b/cvmx-cn3010-evb-hs5.h
new file mode 100644
index 000000000000..af65496a7b5c
--- /dev/null
+++ b/cvmx-cn3010-evb-hs5.h
@@ -0,0 +1,69 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+#ifndef __CVMX_CN3010_EVB_HS5_H__
+#define __CVMX_CN3010_EVB_HS5_H__
+
+/**
+ * @file
+ *
+ * Interface to the EBH-30xx specific devices
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CVMX_RTC_DS1337_ADDR (0x68)
+
+uint32_t cvmx_rtc_ds1337_read(void);
+int cvmx_rtc_ds1337_write(uint32_t time);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_CN3010_EVB_HS5_H__ */
diff --git a/cvmx-compactflash.c b/cvmx-compactflash.c
new file mode 100644
index 000000000000..eca620a6c06e
--- /dev/null
+++ b/cvmx-compactflash.c
@@ -0,0 +1,431 @@
+/***********************license start***************
+ * Copyright (c) 2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+#include "cvmx.h"
+#include "cvmx-sysinfo.h"
+#include "cvmx-compactflash.h"
+
+
+#ifndef MAX
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#endif
+#define FLASH_RoundUP(_Dividend, _Divisor) (((_Dividend)+(_Divisor-1))/(_Divisor))
+/**
+ * Convert nanosecond based time to setting used in the
+ * boot bus timing register, based on timing multiple
+ *
+ *
+ */
+static uint32_t ns_to_tim_reg(int tim_mult, uint32_t nsecs)
+{
+ uint32_t val;
+
+ /* Compute # of eclock periods to get desired duration in nanoseconds */
+ val = FLASH_RoundUP(nsecs * (cvmx_sysinfo_get()->cpu_clock_hz/1000000), 1000);
+
+ /* Factor in timing multiple, if not 1 */
+ if (tim_mult != 1)
+ val = FLASH_RoundUP(val, tim_mult);
+
+ return (val);
+}
+
+uint64_t cvmx_compactflash_generate_dma_tim(int tim_mult, uint16_t *ident_data, int *mwdma_mode_ptr)
+{
+
+ cvmx_mio_boot_dma_timx_t dma_tim;
+ int oe_a;
+ int oe_n;
+ int dma_acks;
+ int dma_ackh;
+ int dma_arq;
+ int pause;
+ int To,Tkr,Td;
+ int mwdma_mode = -1;
+ uint16_t word53_field_valid;
+ uint16_t word63_mwdma;
+ uint16_t word163_adv_timing_info;
+
+ if (!ident_data)
+ return 0;
+
+ word53_field_valid = ident_data[53];
+ word63_mwdma = ident_data[63];
+ word163_adv_timing_info = ident_data[163];
+
+ dma_tim.u64 = 0;
+
+ /* Check for basic MWDMA modes */
+ if (word53_field_valid & 0x2)
+ {
+ if (word63_mwdma & 0x4)
+ mwdma_mode = 2;
+ else if (word63_mwdma & 0x2)
+ mwdma_mode = 1;
+ else if (word63_mwdma & 0x1)
+ mwdma_mode = 0;
+ }
+
+ /* Check for advanced MWDMA modes */
+ switch ((word163_adv_timing_info >> 3) & 0x7)
+ {
+ case 1:
+ mwdma_mode = 3;
+ break;
+ case 2:
+ mwdma_mode = 4;
+ break;
+ default:
+ break;
+
+ }
+ /* DMA is not supported by this card */
+ if (mwdma_mode < 0)
+ return 0;
+
+ /* Now set up the DMA timing */
+ switch (tim_mult)
+ {
+ case 1:
+ dma_tim.s.tim_mult = 1;
+ break;
+ case 2:
+ dma_tim.s.tim_mult = 2;
+ break;
+ case 4:
+ dma_tim.s.tim_mult = 0;
+ break;
+ case 8:
+ dma_tim.s.tim_mult = 3;
+ break;
+ default:
+ cvmx_dprintf("ERROR: invalid boot bus dma tim_mult setting\n");
+ break;
+ }
+
+
+ switch (mwdma_mode)
+ {
+ case 4:
+ To = 80;
+ Td = 55;
+ Tkr = 20;
+
+ oe_a = Td + 20; // Td (Seem to need more margin here....
+ oe_n = MAX(To - oe_a, Tkr); // Tkr from cf spec, lengthened to meet To
+
+ // oe_n + oe_h must be >= To (cycle time)
+ dma_acks = 0; //Ti
+ dma_ackh = 5; // Tj
+
+ dma_arq = 8; // not spec'ed, value in eclocks, not affected by tim_mult
+ pause = 25 - dma_arq * 1000/(cvmx_sysinfo_get()->cpu_clock_hz/1000000); // Tz
+ break;
+ case 3:
+ To = 100;
+ Td = 65;
+ Tkr = 20;
+
+ oe_a = Td + 20; // Td (Seem to need more margin here....
+ oe_n = MAX(To - oe_a, Tkr); // Tkr from cf spec, lengthened to meet To
+
+ // oe_n + oe_h must be >= To (cycle time)
+ dma_acks = 0; //Ti
+ dma_ackh = 5; // Tj
+
+ dma_arq = 8; // not spec'ed, value in eclocks, not affected by tim_mult
+ pause = 25 - dma_arq * 1000/(cvmx_sysinfo_get()->cpu_clock_hz/1000000); // Tz
+ break;
+ case 2:
+ // +20 works
+ // +10 works
+ // + 10 + 0 fails
+ // n=40, a=80 works
+ To = 120;
+ Td = 70;
+ Tkr = 25;
+
+ // oe_a 0 fudge doesn't work; 10 seems to
+ oe_a = Td + 20 + 10; // Td (Seem to need more margin here....
+ oe_n = MAX(To - oe_a, Tkr) + 10; // Tkr from cf spec, lengthened to meet To
+ // oe_n 0 fudge fails;;; 10 boots
+
+ // 20 ns fudge needed on dma_acks
+ // oe_n + oe_h must be >= To (cycle time)
+ dma_acks = 0 + 20; //Ti
+ dma_ackh = 5; // Tj
+
+ dma_arq = 8; // not spec'ed, value in eclocks, not affected by tim_mult
+ pause = 25 - dma_arq * 1000/(cvmx_sysinfo_get()->cpu_clock_hz/1000000); // Tz
+ // no fudge needed on pause
+
+ break;
+ case 1:
+ case 0:
+ default:
+ cvmx_dprintf("ERROR: Unsupported DMA mode: %d\n", mwdma_mode);
+ return(-1);
+ break;
+ }
+
+ if (mwdma_mode_ptr)
+ *mwdma_mode_ptr = mwdma_mode;
+
+ dma_tim.s.dmack_pi = 1;
+
+ dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
+ dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
+
+ dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, dma_acks);
+ dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
+
+ dma_tim.s.dmarq = dma_arq;
+ dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
+
+ dma_tim.s.rd_dly = 0; /* Sample right on edge */
+
+ /* writes only */
+ dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
+ dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
+
+#if 0
+ cvmx_dprintf("ns to ticks (mult %d) of %d is: %d\n", TIM_MULT, 60, ns_to_tim_reg(60));
+ cvmx_dprintf("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
+ dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s, dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
+#endif
+
+ return(dma_tim.u64);
+
+
+}
+
+
+/**
+ * Setup timing and region config to support a specific IDE PIO
+ * mode over the bootbus.
+ *
+ * @param cs0 Bootbus region number connected to CS0 on the IDE device
+ * @param cs1 Bootbus region number connected to CS1 on the IDE device
+ * @param pio_mode PIO mode to set (0-6)
+ */
+void cvmx_compactflash_set_piomode(int cs0, int cs1, int pio_mode)
+{
+ cvmx_mio_boot_reg_cfgx_t mio_boot_reg_cfg;
+ cvmx_mio_boot_reg_timx_t mio_boot_reg_tim;
+ int cs;
+ int clocks_us; /* Number of clock cycles per microsec */
+ int tim_mult;
+ int use_iordy; /* Set for PIO0-4, not set for PIO5-6 */
+ int t1; /* These t names are timing parameters from the ATA spec */
+ int t2;
+ int t2i;
+ int t4;
+ int t6;
+ int t6z;
+ int t9;
+
+ /* PIO modes 0-4 all allow the device to deassert IORDY to slow down
+ the host */
+ use_iordy = 1;
+
+ /* Use the PIO mode to determine timing parameters */
+ switch(pio_mode) {
+ case 6:
+ /* CF spec say IORDY should be ignore in PIO 5 */
+ use_iordy = 0;
+ t1 = 10;
+ t2 = 55;
+ t2i = 20;
+ t4 = 5;
+ t6 = 5;
+ t6z = 20;
+ t9 = 10;
+ break;
+ case 5:
+ /* CF spec say IORDY should be ignore in PIO 6 */
+ use_iordy = 0;
+ t1 = 15;
+ t2 = 65;
+ t2i = 25;
+ t4 = 5;
+ t6 = 5;
+ t6z = 20;
+ t9 = 10;
+ break;
+ case 4:
+ t1 = 25;
+ t2 = 70;
+ t2i = 25;
+ t4 = 10;
+ t6 = 5;
+ t6z = 30;
+ t9 = 10;
+ break;
+ case 3:
+ t1 = 30;
+ t2 = 80;
+ t2i = 70;
+ t4 = 10;
+ t6 = 5;
+ t6z = 30;
+ t9 = 10;
+ break;
+ case 2:
+ t1 = 30;
+ t2 = 100;
+ t2i = 0;
+ t4 = 15;
+ t6 = 5;
+ t6z = 30;
+ t9 = 10;
+ break;
+ case 1:
+ t1 = 50;
+ t2 = 125;
+ t2i = 0;
+ t4 = 20;
+ t6 = 5;
+ t6z = 30;
+ t9 = 15;
+ break;
+ default:
+ t1 = 70;
+ t2 = 165;
+ t2i = 0;
+ t4 = 30;
+ t6 = 5;
+ t6z = 30;
+ t9 = 20;
+ break;
+ }
+ /* Convert times in ns to clock cycles, rounding up */
+ clocks_us = FLASH_RoundUP((uint64_t)cvmx_sysinfo_get()->cpu_clock_hz, 1000000);
+
+ /* Convert times in clock cycles, rounding up. Octeon parameters are in
+ minus one notation, so take off one after the conversion */
+ t1 = FLASH_RoundUP(t1 * clocks_us, 1000);
+ if (t1)
+ t1--;
+ t2 = FLASH_RoundUP(t2 * clocks_us, 1000);
+ if (t2)
+ t2--;
+ t2i = FLASH_RoundUP(t2i * clocks_us, 1000);
+ if (t2i)
+ t2i--;
+ t4 = FLASH_RoundUP(t4 * clocks_us, 1000);
+ if (t4)
+ t4--;
+ t6 = FLASH_RoundUP(t6 * clocks_us, 1000);
+ if (t6)
+ t6--;
+ t6z = FLASH_RoundUP(t6z * clocks_us, 1000);
+ if (t6z)
+ t6z--;
+ t9 = FLASH_RoundUP(t9 * clocks_us, 1000);
+ if (t9)
+ t9--;
+
+ /* Start using a scale factor of one cycle. Keep doubling it until
+ the parameters fit in their fields. Since t2 is the largest number,
+ we only need to check it */
+ tim_mult = 1;
+ while (t2 >= 1<<6)
+ {
+ t1 = FLASH_RoundUP(t1, 2);
+ t2 = FLASH_RoundUP(t2, 2);
+ t2i = FLASH_RoundUP(t2i, 2);
+ t4 = FLASH_RoundUP(t4, 2);
+ t6 = FLASH_RoundUP(t6, 2);
+ t6z = FLASH_RoundUP(t6z, 2);
+ t9 = FLASH_RoundUP(t9, 2);
+ tim_mult *= 2;
+ }
+
+ cs = cs0;
+ do {
+ mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+ mio_boot_reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
+ switch(tim_mult) {
+ case 1:
+ mio_boot_reg_cfg.s.tim_mult = 1;
+ break;
+ case 2:
+ mio_boot_reg_cfg.s.tim_mult = 2;
+ break;
+ case 4:
+ mio_boot_reg_cfg.s.tim_mult = 0;
+ break;
+ case 8:
+ default:
+ mio_boot_reg_cfg.s.tim_mult = 3;
+ break;
+ }
+ mio_boot_reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
+ mio_boot_reg_cfg.s.sam = 0; /* Don't combine write and output enable */
+ mio_boot_reg_cfg.s.we_ext = 0; /* No write enable extension */
+ mio_boot_reg_cfg.s.oe_ext = 0; /* No read enable extension */
+ mio_boot_reg_cfg.s.en = 1; /* Enable this region */
+ mio_boot_reg_cfg.s.orbit = 0; /* Don't combine with previos region */
+ mio_boot_reg_cfg.s.width = 1; /* 16 bits wide */
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), mio_boot_reg_cfg.u64);
+ if(cs == cs0)
+ cs = cs1;
+ else
+ cs = cs0;
+ } while(cs != cs0);
+
+ mio_boot_reg_tim.u64 = 0;
+ mio_boot_reg_tim.s.pagem = 0; /* Disable page mode */
+ mio_boot_reg_tim.s.waitm = use_iordy; /* Enable dynamic timing */
+ mio_boot_reg_tim.s.pages = 0; /* Pages are disabled */
+ mio_boot_reg_tim.s.ale = 8; /* If someone uses ALE, this seems to work */
+ mio_boot_reg_tim.s.page = 0; /* Not used */
+ mio_boot_reg_tim.s.wait = 0; /* Time after IORDY to coninue to assert the data */
+ mio_boot_reg_tim.s.pause = 0; /* Time after CE that signals stay valid */
+ mio_boot_reg_tim.s.wr_hld = t9; /* How long to hold after a write */
+ mio_boot_reg_tim.s.rd_hld = t9; /* How long to wait after a read for device to tristate */
+ mio_boot_reg_tim.s.we = t2; /* How long write enable is asserted */
+ mio_boot_reg_tim.s.oe = t2; /* How long read enable is asserted */
+ mio_boot_reg_tim.s.ce = t1; /* Time after CE that read/write starts */
+ mio_boot_reg_tim.s.adr = 1; /* Time before CE that address is valid */
+
+ /* Program the bootbus region timing for both chip selects */
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs0), mio_boot_reg_tim.u64);
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs1), mio_boot_reg_tim.u64);
+}
diff --git a/cvmx-compactflash.h b/cvmx-compactflash.h
new file mode 100644
index 000000000000..dd6d9cfae68f
--- /dev/null
+++ b/cvmx-compactflash.h
@@ -0,0 +1,76 @@
+/***********************license start***************
+ * Copyright (c) 2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+#ifndef __CVMX_COMPACTFLASH_H__
+#define __CVMX_COMPACTFLASH_H__
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * This function takes values from the compact flash device
+ * identify response, and returns the appropriate value to write
+ * into the boot bus DMA timing register.
+ *
+ * @param tim_mult Eclock timing multiple to use
+ * @param ident_data Data returned by the 'identify' command. This is used to
+ * determine the DMA modes supported by the card, if any.
+ * @param mwdma_mode_ptr
+ * Optional pointer to return MWDMA mode in
+ *
+ * @return 64 bit value to write to DMA timing register
+ */
+extern uint64_t cvmx_compactflash_generate_dma_tim(int tim_mult, uint16_t *ident_data, int *mwdma_mode_ptr);
+
+/**
+ * Setup timing and region config to support a specific IDE PIO
+ * mode over the bootbus.
+ *
+ * @param cs0 Bootbus region number connected to CS0 on the IDE device
+ * @param cs1 Bootbus region number connected to CS1 on the IDE device
+ * @param pio_mode PIO mode to set (0-6)
+ */
+extern void cvmx_compactflash_set_piomode(int cs0, int cs1, int pio_mode);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __CVMX_COMPACTFLASH_H__ */
diff --git a/cvmx-core.c b/cvmx-core.c
new file mode 100644
index 000000000000..43d8eee00dc1
--- /dev/null
+++ b/cvmx-core.c
@@ -0,0 +1,147 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Module to support operations on core such as TLB config, etc.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-core.h"
+
+
+/**
+ * Adds a wired TLB entry, and returns the index of the entry added.
+ * Parameters are written to TLB registers without further processing.
+ *
+ * @param hi HI register value
+ * @param lo0 lo0 register value
+ * @param lo1 lo1 register value
+ * @param page_mask pagemask register value
+ *
+ * @return Success: TLB index used (0-31) or (0-63) for OCTEON Plus
+ * Failure: -1
+ */
+int cvmx_core_add_wired_tlb_entry(uint64_t hi, uint64_t lo0, uint64_t lo1, cvmx_tlb_pagemask_t page_mask)
+{
+ uint32_t index;
+ uint32_t index_limit = 31;
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ {
+ index_limit=63;
+ }
+
+ CVMX_MF_TLB_WIRED(index);
+ if (index >= index_limit)
+ {
+ return(-1);
+ }
+ CVMX_MT_ENTRY_HIGH(hi);
+ CVMX_MT_ENTRY_LO_0(lo0);
+ CVMX_MT_ENTRY_LO_1(lo1);
+ CVMX_MT_PAGEMASK(page_mask);
+ CVMX_MT_TLB_INDEX(index);
+ CVMX_MT_TLB_WIRED(index + 1);
+ CVMX_EHB;
+ CVMX_TLBWI;
+ CVMX_EHB;
+ return(index);
+}
+
+
+
+/**
+ * Adds a fixed (wired) TLB mapping. Returns TLB index used or -1 on error.
+ * This is a wrapper around cvmx_core_add_wired_tlb_entry()
+ *
+ * @param vaddr Virtual address to map
+ * @param page0_addr page 0 physical address, with low 3 bits representing the DIRTY, VALID, and GLOBAL bits
+ * @param page1_addr page1 physical address, with low 3 bits representing the DIRTY, VALID, and GLOBAL bits
+ * @param page_mask page mask.
+ *
+ * @return Success: TLB index used (0-31)
+ * Failure: -1
+ */
+int cvmx_core_add_fixed_tlb_mapping_bits(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask)
+{
+
+ if ((vaddr & (page_mask | 0x7ff))
+ || ((page0_addr & ~0x7ULL) & ((page_mask | 0x7ff) >> 1))
+ || ((page1_addr & ~0x7ULL) & ((page_mask | 0x7ff) >> 1)))
+ {
+ cvmx_dprintf("Error adding tlb mapping: invalid address alignment at vaddr: 0x%llx\n", (unsigned long long)vaddr);
+ return(-1);
+ }
+
+
+ return(cvmx_core_add_wired_tlb_entry(vaddr,
+ (page0_addr >> 6) | (page0_addr & 0x7),
+ (page1_addr >> 6) | (page1_addr & 0x7),
+ page_mask));
+
+}
+/**
+ * Adds a fixed (wired) TLB mapping. Returns TLB index used or -1 on error.
+ * Assumes both pages are valid. Use cvmx_core_add_fixed_tlb_mapping_bits for more control.
+ * This is a wrapper around cvmx_core_add_wired_tlb_entry()
+ *
+ * @param vaddr Virtual address to map
+ * @param page0_addr page 0 physical address
+ * @param page1_addr page1 physical address
+ * @param page_mask page mask.
+ *
+ * @return Success: TLB index used (0-31)
+ * Failure: -1
+ */
+int cvmx_core_add_fixed_tlb_mapping(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask)
+{
+
+ return(cvmx_core_add_fixed_tlb_mapping_bits(vaddr, page0_addr | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, page1_addr | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, page_mask));
+
+}
diff --git a/cvmx-core.h b/cvmx-core.h
new file mode 100644
index 000000000000..e498c572e717
--- /dev/null
+++ b/cvmx-core.h
@@ -0,0 +1,166 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Module to support operations on core such as TLB config, etc.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+
+
+#ifndef __CVMX_CORE_H__
+#define __CVMX_CORE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * The types of performance counters supported per cpu
+ */
+typedef enum
+{
+ CVMX_CORE_PERF_NONE = 0, /**< Turn off the performance counter */
+ CVMX_CORE_PERF_CLK = 1, /**< Conditionally clocked cycles (as opposed to count/cvm_count which count even with no clocks) */
+ CVMX_CORE_PERF_ISSUE = 2, /**< Instructions issued but not retired */
+ CVMX_CORE_PERF_RET = 3, /**< Instructions retired */
+ CVMX_CORE_PERF_NISSUE = 4, /**< Cycles no issue */
+ CVMX_CORE_PERF_SISSUE = 5, /**< Cycles single issue */
+ CVMX_CORE_PERF_DISSUE = 6, /**< Cycles dual issue */
+ CVMX_CORE_PERF_IFI = 7, /**< Cycle ifetch issued (but not necessarily commit to pp_mem) */
+ CVMX_CORE_PERF_BR = 8, /**< Branches retired */
+ CVMX_CORE_PERF_BRMIS = 9, /**< Branch mispredicts */
+ CVMX_CORE_PERF_J = 10, /**< Jumps retired */
+ CVMX_CORE_PERF_JMIS = 11, /**< Jumps mispredicted */
+ CVMX_CORE_PERF_REPLAY = 12, /**< Mem Replays */
+ CVMX_CORE_PERF_IUNA = 13, /**< Cycles idle due to unaligned_replays */
+ CVMX_CORE_PERF_TRAP = 14, /**< trap_6a signal */
+ CVMX_CORE_PERF_UULOAD = 16, /**< Unexpected unaligned loads (REPUN=1) */
+ CVMX_CORE_PERF_UUSTORE = 17, /**< Unexpected unaligned store (REPUN=1) */
+ CVMX_CORE_PERF_ULOAD = 18, /**< Unaligned loads (REPUN=1 or USEUN=1) */
+ CVMX_CORE_PERF_USTORE = 19, /**< Unaligned store (REPUN=1 or USEUN=1) */
+ CVMX_CORE_PERF_EC = 20, /**< Exec clocks(must set CvmCtl[DISCE] for accurate timing) */
+ CVMX_CORE_PERF_MC = 21, /**< Mul clocks(must set CvmCtl[DISCE] for accurate timing) */
+ CVMX_CORE_PERF_CC = 22, /**< Crypto clocks(must set CvmCtl[DISCE] for accurate timing) */
+ CVMX_CORE_PERF_CSRC = 23, /**< Issue_csr clocks(must set CvmCtl[DISCE] for accurate timing) */
+ CVMX_CORE_PERF_CFETCH = 24, /**< Icache committed fetches (demand+prefetch) */
+ CVMX_CORE_PERF_CPREF = 25, /**< Icache committed prefetches */
+ CVMX_CORE_PERF_ICA = 26, /**< Icache aliases */
+ CVMX_CORE_PERF_II = 27, /**< Icache invalidates */
+ CVMX_CORE_PERF_IP = 28, /**< Icache parity error */
+ CVMX_CORE_PERF_CIMISS = 29, /**< Cycles idle due to imiss (must set CvmCtl[DISCE] for accurate timing) */
+ CVMX_CORE_PERF_WBUF = 32, /**< Number of write buffer entries created */
+ CVMX_CORE_PERF_WDAT = 33, /**< Number of write buffer data cycles used (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_WBUFLD = 34, /**< Number of write buffer entries forced out by loads */
+ CVMX_CORE_PERF_WBUFFL = 35, /**< Number of cycles that there was no available write buffer entry (may need to set CvmCtl[DISCE] and CvmMemCtl[MCLK] for accurate counts) */
+ CVMX_CORE_PERF_WBUFTR = 36, /**< Number of stores that found no available write buffer entries */
+ CVMX_CORE_PERF_BADD = 37, /**< Number of address bus cycles used (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_BADDL2 = 38, /**< Number of address bus cycles not reflected (i.e. destined for L2) (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_BFILL = 39, /**< Number of fill bus cycles used (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_DDIDS = 40, /**< Number of Dstream DIDs created */
+ CVMX_CORE_PERF_IDIDS = 41, /**< Number of Istream DIDs created */
+ CVMX_CORE_PERF_DIDNA = 42, /**< Number of cycles that no DIDs were available (may need to set CvmCtl[DISCE] and CvmMemCtl[MCLK] for accurate counts) */
+ CVMX_CORE_PERF_LDS = 43, /**< Number of load issues */
+ CVMX_CORE_PERF_LMLDS = 44, /**< Number of local memory load */
+ CVMX_CORE_PERF_IOLDS = 45, /**< Number of I/O load issues */
+ CVMX_CORE_PERF_DMLDS = 46, /**< Number of loads that were not prefetches and missed in the cache */
+ CVMX_CORE_PERF_STS = 48, /**< Number of store issues */
+ CVMX_CORE_PERF_LMSTS = 49, /**< Number of local memory store issues */
+ CVMX_CORE_PERF_IOSTS = 50, /**< Number of I/O store issues */
+ CVMX_CORE_PERF_IOBDMA = 51, /**< Number of IOBDMAs */
+ CVMX_CORE_PERF_DTLB = 53, /**< Number of dstream TLB refill, invalid, or modified exceptions */
+ CVMX_CORE_PERF_DTLBAD = 54, /**< Number of dstream TLB address errors */
+ CVMX_CORE_PERF_ITLB = 55, /**< Number of istream TLB refill, invalid, or address error exceptions */
+ CVMX_CORE_PERF_SYNC = 56, /**< Number of SYNC stall cycles (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_SYNCIOB = 57, /**< Number of SYNCIOBDMA stall cycles (may need to set CvmCtl[DISCE] for accurate counts) */
+ CVMX_CORE_PERF_SYNCW = 58, /**< Number of SYNCWs */
+ CVMX_CORE_PERF_MAX /**< This not a counter, just a marker for the highest number */
+} cvmx_core_perf_t;
+
+/**
+ * Bit description of the COP0 counter control register
+ */
+typedef union
+{
+ uint32_t u32;
+ struct
+ {
+ uint32_t m : 1; /**< Set to 1 for sel 0 and 0 for sel 2, indicating there are two performance counters */
+ uint32_t w : 1; /**< Set to 1 indicating coutners are 64 bit */
+ uint32_t reserved_11_29 :19;
+ cvmx_core_perf_t event : 6; /**< Selects the event to be counted by the corresponding Counter Register */
+ uint32_t ie : 1; /**< Count in interrupt context */
+ uint32_t u : 1; /**< Count in user mode */
+ uint32_t s : 1; /**< Count in supervisor mode */
+ uint32_t k : 1; /**< Count in kernel mode */
+ uint32_t ex : 1; /**< Count in exception context */
+ } s;
+} cvmx_core_perf_control_t;
+
+typedef enum {
+ CVMX_TLB_PAGEMASK_4K = 0x3 << 11,
+ CVMX_TLB_PAGEMASK_16K = 0xF << 11,
+ CVMX_TLB_PAGEMASK_64K = 0x3F << 11,
+ CVMX_TLB_PAGEMASK_256K = 0xFF << 11,
+ CVMX_TLB_PAGEMASK_1M = 0x3FF << 11,
+ CVMX_TLB_PAGEMASK_4M = 0xFFF << 11,
+ CVMX_TLB_PAGEMASK_16M = 0x3FFF << 11,
+ CVMX_TLB_PAGEMASK_64M = 0xFFFF << 11,
+ CVMX_TLB_PAGEMASK_256M = 0x3FFFF << 11,
+} cvmx_tlb_pagemask_t;
+
+
+int cvmx_core_add_wired_tlb_entry(uint64_t hi, uint64_t lo0, uint64_t lo1, cvmx_tlb_pagemask_t page_mask);
+
+
+int cvmx_core_add_fixed_tlb_mapping(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask);
+int cvmx_core_add_fixed_tlb_mapping_bits(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_CORE_H__ */
diff --git a/cvmx-coremask.c b/cvmx-coremask.c
new file mode 100644
index 000000000000..5574c83b66b7
--- /dev/null
+++ b/cvmx-coremask.c
@@ -0,0 +1,132 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Module to support operations on bitmap of cores. Coremask can be used to
+ * select a specific core, a group of cores, or all available cores, for
+ * initialization and differentiation of roles within a single shared binary
+ * executable image.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-spinlock.h"
+#include "cvmx-coremask.h"
+
+
+#define CVMX_COREMASK_MAX_SYNCS 20 /* maximum number of coremasks for barrier sync */
+
+/**
+ * This structure defines the private state maintained by coremask module.
+ *
+ */
+CVMX_SHARED static struct {
+
+ cvmx_spinlock_t lock; /**< mutex spinlock */
+
+ struct {
+
+ unsigned int coremask; /**< coremask specified for barrier */
+ unsigned int checkin; /**< bitmask of cores checking in */
+ volatile unsigned int exit; /**< variable to poll for exit condition */
+
+ } s[CVMX_COREMASK_MAX_SYNCS];
+
+} state = {
+
+ { CVMX_SPINLOCK_UNLOCKED_VAL },
+
+ { { 0, 0, 0 } },
+};
+
+
+/**
+ * Wait (stall) until all cores in the given coremask has reached this point
+ * in the program execution before proceeding.
+ *
+ * @param coremask the group of cores performing the barrier sync
+ *
+ */
+void cvmx_coremask_barrier_sync(unsigned int coremask)
+{
+ int i;
+ unsigned int target;
+
+ assert(coremask != 0);
+
+ cvmx_spinlock_lock(&state.lock);
+
+ for (i = 0; i < CVMX_COREMASK_MAX_SYNCS; i++) {
+
+ if (state.s[i].coremask == 0) {
+ /* end of existing coremask list, create new entry, fall-thru */
+ state.s[i].coremask = coremask;
+ }
+
+ if (state.s[i].coremask == coremask) {
+
+ target = state.s[i].exit + 1; /* wrap-around at 32b */
+
+ state.s[i].checkin |= cvmx_coremask_core(cvmx_get_core_num());
+ if (state.s[i].checkin == coremask) {
+ state.s[i].checkin = 0;
+ state.s[i].exit = target; /* signal exit condition */
+ }
+ cvmx_spinlock_unlock(&state.lock);
+
+ while (state.s[i].exit != target)
+ ;
+
+ return;
+ }
+ }
+
+ /* error condition - coremask array overflowed */
+ cvmx_spinlock_unlock(&state.lock);
+ assert(0);
+}
diff --git a/cvmx-coremask.h b/cvmx-coremask.h
new file mode 100644
index 000000000000..132e46221102
--- /dev/null
+++ b/cvmx-coremask.h
@@ -0,0 +1,161 @@
+/***********************license start***************
+ * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+
+
+
+
+
+/**
+ * @file
+ *
+ * Module to support operations on bitmap of cores. Coremask can be used to
+ * select a specific core, a group of cores, or all available cores, for
+ * initialization and differentiation of roles within a single shared binary
+ * executable image.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+
+
+#ifndef __CVMX_COREMASK_H__
+#define __CVMX_COREMASK_H__
+
+#include "cvmx-asm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * coremask is simply unsigned int (32 bits).
+ *
+ * NOTE: supports up to 32 cores maximum.
+ *
+ * union of coremasks is simply bitwise-or.
+ * intersection of coremasks is simply bitwise-and.
+ *
+ */
+
+#define CVMX_COREMASK_MAX 0xFFFFFFFFu /* maximum supported mask */
+
+
+/**
+ * Compute coremask for a specific core.
+ *
+ * @param core_id The core ID
+ *
+ * @return coremask for a specific core
+ *
+ */
+static inline unsigned int cvmx_coremask_core(unsigned int core_id)
+{
+ return (1u << core_id);
+}
+
+/**
+ * Compute coremask for num_cores cores starting with core 0.
+ *
+ * @param num_cores number of cores
+ *
+ * @return coremask for num_cores cores
+ *
+ */
+static inline unsigned int cvmx_coremask_numcores(unsigned int num_cores)
+{
+ return (CVMX_COREMASK_MAX >> (32 - num_cores));
+}
+
+/**
+ * Compute coremask for a range of cores from core low to core high.
+ *
+ * @param low first core in the range
+ * @param high last core in the range
+ *
+ * @return coremask for the range of cores
+ *
+ */
+static inline unsigned int cvmx_coremask_range(unsigned int low, unsigned int high)
+{
+ return ((CVMX_COREMASK_MAX >> (31 - high + low)) << low);
+}
+
+
+/**
+ * Test to see if current core is a member of coremask.
+ *
+ * @param coremask the coremask to test against
+ *
+ * @return 1 if current core is a member of coremask, 0 otherwise
+ *
+ */
+static inline int cvmx_coremask_is_member(unsigned int coremask)
+{
+ return ((cvmx_coremask_core(cvmx_get_core_num()) & coremask) != 0);
+}
+
+/**
+ * Test to see if current core is first core in coremask.
+ *
+ * @param coremask the coremask to test against
+ *
+ * @return 1 if current core is first core in the coremask, 0 otherwise
+ *
+ */
+static inline int cvmx_coremask_first_core(unsigned int coremask)
+{
+ return cvmx_coremask_is_member(coremask)
+ && ((cvmx_get_core_num() == 0) ||
+ ((cvmx_coremask_numcores(cvmx_get_core_num()) & coremask) == 0));
+}
+
+/**
+ * Wait (stall) until all cores in the given coremask has reached this point
+ * in the program execution before proceeding.
+ *
+ * @param coremask the group of cores performing the barrier sync
+ *
+ */
+extern void cvmx_coremask_barrier_sync(unsigned int coremask);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CVMX_COREMASK_H__ */
diff --git a/cvmx-csr-addresses.h b/cvmx-csr-addresses.h
new file mode 100644
index 000000000000..e9ba13ef6eb6
--- /dev/null
+++ b/cvmx-csr-addresses.h
@@ -0,0 +1,15490 @@
+/***********************license start***************
+ * Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+ * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+ * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
+ * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
+ * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ *
+ *
+ * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ *
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and for
+ * Octeon. Include cvmx-csr.h instead of this file directly.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ * <hr>$Revision: 41586 $<hr>
+ *
+ */
+#ifndef __CVMX_CSR_ADDRESSES_H__
+#define __CVMX_CSR_ADDRESSES_H__
+
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+#include "cvmx-warn.h"
+#endif
+
+#define CVMX_AGL_GMX_BAD_REG CVMX_AGL_GMX_BAD_REG_FUNC()
+static inline uint64_t CVMX_AGL_GMX_BAD_REG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_BAD_REG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000518ull);
+}
+
+#define CVMX_AGL_GMX_BIST CVMX_AGL_GMX_BIST_FUNC()
+static inline uint64_t CVMX_AGL_GMX_BIST_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_BIST not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000400ull);
+}
+
+#define CVMX_AGL_GMX_DRV_CTL CVMX_AGL_GMX_DRV_CTL_FUNC()
+static inline uint64_t CVMX_AGL_GMX_DRV_CTL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_DRV_CTL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00007F0ull);
+}
+
+#define CVMX_AGL_GMX_INF_MODE CVMX_AGL_GMX_INF_MODE_FUNC()
+static inline uint64_t CVMX_AGL_GMX_INF_MODE_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_INF_MODE not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00007F8ull);
+}
+
+static inline uint64_t CVMX_AGL_GMX_PRTX_CFG(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_PRTX_CFG(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000010ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM0(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM0(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000180ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM1(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM1(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000188ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM2(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM2(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000190ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM3(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM3(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000198ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM4(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM4(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM5(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM5(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM_EN(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM_EN(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000108ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CTL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CTL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000100ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_DECISION(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_DECISION(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000040ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CHK(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CHK(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000020ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CTL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CTL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000018ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MAX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MAX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000030ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MIN(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MIN(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000028ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_IFG(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_IFG(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000058ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_INT_EN(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_INT_EN(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000008ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_INT_REG(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_INT_REG(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000000ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_JABBER(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_JABBER(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000038ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000068ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_CTL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000050ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000088ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000098ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000080ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000090ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RXX_UDD_SKP(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RXX_UDD_SKP(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000048ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RX_BP_DROPX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RX_BP_DROPX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000420ull) + (offset&1)*8;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RX_BP_OFFX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RX_BP_OFFX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000460ull) + (offset&1)*8;
+}
+
+static inline uint64_t CVMX_AGL_GMX_RX_BP_ONX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_RX_BP_ONX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000440ull) + (offset&1)*8;
+}
+
+#define CVMX_AGL_GMX_RX_PRT_INFO CVMX_AGL_GMX_RX_PRT_INFO_FUNC()
+static inline uint64_t CVMX_AGL_GMX_RX_PRT_INFO_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_RX_PRT_INFO not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00004E8ull);
+}
+
+#define CVMX_AGL_GMX_RX_TX_STATUS CVMX_AGL_GMX_RX_TX_STATUS_FUNC()
+static inline uint64_t CVMX_AGL_GMX_RX_TX_STATUS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_RX_TX_STATUS not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00007E8ull);
+}
+
+static inline uint64_t CVMX_AGL_GMX_SMACX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_SMACX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000230ull) + (offset&1)*2048;
+}
+
+#define CVMX_AGL_GMX_STAT_BP CVMX_AGL_GMX_STAT_BP_FUNC()
+static inline uint64_t CVMX_AGL_GMX_STAT_BP_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_STAT_BP not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000520ull);
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_APPEND(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_APPEND(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000218ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_CTL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_CTL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000270ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_MIN_PKT(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_MIN_PKT(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000240ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000248ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000238ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_TOGO(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_TOGO(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000258ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_ZERO(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_ZERO(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000260ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_SOFT_PAUSE(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_SOFT_PAUSE(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000250ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT0(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT0(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000280ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT1(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT1(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000288ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT2(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT2(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000290ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT3(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT3(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000298ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT4(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT4(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT5(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT5(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT6(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT6(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT7(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT7(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT8(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT8(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STAT9(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STAT9(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_STATS_CTL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000268ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_AGL_GMX_TXX_THRESH(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_AGL_GMX_TXX_THRESH(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000210ull) + (offset&1)*2048;
+}
+
+#define CVMX_AGL_GMX_TX_BP CVMX_AGL_GMX_TX_BP_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_BP_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_BP not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00004D0ull);
+}
+
+#define CVMX_AGL_GMX_TX_COL_ATTEMPT CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_COL_ATTEMPT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000498ull);
+}
+
+#define CVMX_AGL_GMX_TX_IFG CVMX_AGL_GMX_TX_IFG_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_IFG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_IFG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000488ull);
+}
+
+#define CVMX_AGL_GMX_TX_INT_EN CVMX_AGL_GMX_TX_INT_EN_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_INT_EN_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_INT_EN not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000508ull);
+}
+
+#define CVMX_AGL_GMX_TX_INT_REG CVMX_AGL_GMX_TX_INT_REG_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_INT_REG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_INT_REG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000500ull);
+}
+
+#define CVMX_AGL_GMX_TX_JAM CVMX_AGL_GMX_TX_JAM_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_JAM_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_JAM not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E0000490ull);
+}
+
+#define CVMX_AGL_GMX_TX_LFSR CVMX_AGL_GMX_TX_LFSR_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_LFSR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_LFSR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00004F8ull);
+}
+
+#define CVMX_AGL_GMX_TX_OVR_BP CVMX_AGL_GMX_TX_OVR_BP_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_OVR_BP_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_OVR_BP not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00004C8ull);
+}
+
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00004A0ull);
+}
+
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC()
+static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800E00004A8ull);
+}
+
+#define CVMX_ASX0_DBG_DATA_DRV CVMX_ASX0_DBG_DATA_DRV_FUNC()
+static inline uint64_t CVMX_ASX0_DBG_DATA_DRV_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_ASX0_DBG_DATA_DRV not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000208ull);
+}
+
+#define CVMX_ASX0_DBG_DATA_ENABLE CVMX_ASX0_DBG_DATA_ENABLE_FUNC()
+static inline uint64_t CVMX_ASX0_DBG_DATA_ENABLE_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_ASX0_DBG_DATA_ENABLE not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000200ull);
+}
+
+static inline uint64_t CVMX_ASXX_GMII_RX_CLK_SET(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_ASXX_GMII_RX_CLK_SET(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000180ull) + (block_id&0)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_GMII_RX_DAT_SET(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_ASXX_GMII_RX_DAT_SET(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000188ull) + (block_id&0)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_INT_EN(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_INT_EN(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000018ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_INT_REG(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_INT_REG(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000010ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_MII_RX_DAT_SET(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_ASXX_MII_RX_DAT_SET(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000190ull) + (block_id&0)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_PRT_LOOP(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_PRT_LOOP(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000040ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RLD_BYPASS(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_BYPASS(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000248ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RLD_BYPASS_SETTING(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_BYPASS_SETTING(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000250ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RLD_COMP(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_COMP(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000220ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RLD_DATA_DRV(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_DATA_DRV(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000218ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RLD_FCRAM_MODE(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_FCRAM_MODE(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000210ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RLD_NCTL_STRONG(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_NCTL_STRONG(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000230ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RLD_NCTL_WEAK(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_NCTL_WEAK(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000240ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RLD_PCTL_STRONG(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_PCTL_STRONG(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000228ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RLD_PCTL_WEAK(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_PCTL_WEAK(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000238ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RLD_SETTING(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RLD_SETTING(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000258ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RX_CLK_SETX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_ASXX_RX_CLK_SETX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000020ull) + ((offset&3) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_ASXX_RX_PRT_EN(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_PRT_EN(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000000ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RX_WOL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000100ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RX_WOL_MSK(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL_MSK(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000108ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RX_WOL_POWOK(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL_POWOK(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000118ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_RX_WOL_SIG(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_RX_WOL_SIG(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000110ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_TX_CLK_SETX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_ASXX_TX_CLK_SETX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000048ull) + ((offset&3) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_ASXX_TX_COMP_BYP(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_TX_COMP_BYP(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000068ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_ASXX_TX_HI_WATERX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_ASXX_TX_HI_WATERX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000080ull) + ((offset&3) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_ASXX_TX_PRT_EN(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_ASXX_TX_PRT_EN(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800B0000008ull) + (block_id&1)*0x8000000ull;
+}
+
+#define CVMX_CIU_BIST CVMX_CIU_BIST_FUNC()
+static inline uint64_t CVMX_CIU_BIST_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000730ull);
+}
+
+#define CVMX_CIU_DINT CVMX_CIU_DINT_FUNC()
+static inline uint64_t CVMX_CIU_DINT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000720ull);
+}
+
+#define CVMX_CIU_FUSE CVMX_CIU_FUSE_FUNC()
+static inline uint64_t CVMX_CIU_FUSE_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000728ull);
+}
+
+#define CVMX_CIU_GSTOP CVMX_CIU_GSTOP_FUNC()
+static inline uint64_t CVMX_CIU_GSTOP_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000710ull);
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN0(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32)))))
+ cvmx_warn("CVMX_CIU_INTX_EN0(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000200ull) + (offset&63)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN0_W1C(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32)))))
+ cvmx_warn("CVMX_CIU_INTX_EN0_W1C(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000002200ull) + (offset&63)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN0_W1S(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32)))))
+ cvmx_warn("CVMX_CIU_INTX_EN0_W1S(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000006200ull) + (offset&63)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN1(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32)))))
+ cvmx_warn("CVMX_CIU_INTX_EN1(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000208ull) + (offset&63)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN1_W1C(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32)))))
+ cvmx_warn("CVMX_CIU_INTX_EN1_W1C(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000002208ull) + (offset&63)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN1_W1S(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32)))))
+ cvmx_warn("CVMX_CIU_INTX_EN1_W1S(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000006208ull) + (offset&63)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN4_0(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_0(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000C80ull) + (offset&15)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN4_0_W1C(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_0_W1C(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000002C80ull) + (offset&15)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN4_0_W1S(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_0_W1S(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000006C80ull) + (offset&15)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN4_1(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_1(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000C88ull) + (offset&15)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN4_1_W1C(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_1_W1C(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000002C88ull) + (offset&15)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_EN4_1_W1S(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15)))))
+ cvmx_warn("CVMX_CIU_INTX_EN4_1_W1S(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000006C88ull) + (offset&15)*16;
+}
+
+static inline uint64_t CVMX_CIU_INTX_SUM0(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 23) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7) || (offset == 32)))))
+ cvmx_warn("CVMX_CIU_INTX_SUM0(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000000ull) + (offset&63)*8;
+}
+
+static inline uint64_t CVMX_CIU_INTX_SUM4(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_INTX_SUM4(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000C00ull) + (offset&15)*8;
+}
+
+#define CVMX_CIU_INT_SUM1 CVMX_CIU_INT_SUM1_FUNC()
+static inline uint64_t CVMX_CIU_INT_SUM1_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000108ull);
+}
+
+static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_MBOX_CLRX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset&15)*8;
+}
+
+static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_MBOX_SETX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset&15)*8;
+}
+
+#define CVMX_CIU_NMI CVMX_CIU_NMI_FUNC()
+static inline uint64_t CVMX_CIU_NMI_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000718ull);
+}
+
+#define CVMX_CIU_PCI_INTA CVMX_CIU_PCI_INTA_FUNC()
+static inline uint64_t CVMX_CIU_PCI_INTA_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000750ull);
+}
+
+#define CVMX_CIU_PP_DBG CVMX_CIU_PP_DBG_FUNC()
+static inline uint64_t CVMX_CIU_PP_DBG_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000708ull);
+}
+
+static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_PP_POKEX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset&15)*8;
+}
+
+#define CVMX_CIU_PP_RST CVMX_CIU_PP_RST_FUNC()
+static inline uint64_t CVMX_CIU_PP_RST_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000700ull);
+}
+
+#define CVMX_CIU_QLM_DCOK CVMX_CIU_QLM_DCOK_FUNC()
+static inline uint64_t CVMX_CIU_QLM_DCOK_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_CIU_QLM_DCOK not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000760ull);
+}
+
+#define CVMX_CIU_QLM_JTGC CVMX_CIU_QLM_JTGC_FUNC()
+static inline uint64_t CVMX_CIU_QLM_JTGC_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_CIU_QLM_JTGC not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000768ull);
+}
+
+#define CVMX_CIU_QLM_JTGD CVMX_CIU_QLM_JTGD_FUNC()
+static inline uint64_t CVMX_CIU_QLM_JTGD_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_CIU_QLM_JTGD not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000770ull);
+}
+
+#define CVMX_CIU_SOFT_BIST CVMX_CIU_SOFT_BIST_FUNC()
+static inline uint64_t CVMX_CIU_SOFT_BIST_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000738ull);
+}
+
+#define CVMX_CIU_SOFT_PRST CVMX_CIU_SOFT_PRST_FUNC()
+static inline uint64_t CVMX_CIU_SOFT_PRST_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000748ull);
+}
+
+#define CVMX_CIU_SOFT_PRST1 CVMX_CIU_SOFT_PRST1_FUNC()
+static inline uint64_t CVMX_CIU_SOFT_PRST1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_CIU_SOFT_PRST1 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000758ull);
+}
+
+#define CVMX_CIU_SOFT_RST CVMX_CIU_SOFT_RST_FUNC()
+static inline uint64_t CVMX_CIU_SOFT_RST_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000740ull);
+}
+
+static inline uint64_t CVMX_CIU_TIMX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_TIMX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000480ull) + (offset&3)*8;
+}
+
+static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_CIU_WDOGX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset&15)*8;
+}
+
+#define CVMX_DBG_DATA CVMX_DBG_DATA_FUNC()
+static inline uint64_t CVMX_DBG_DATA_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DBG_DATA not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011F00000001E8ull);
+}
+
+#define CVMX_DFA_BST0 CVMX_DFA_BST0_FUNC()
+static inline uint64_t CVMX_DFA_BST0_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_BST0 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800300007F0ull);
+}
+
+#define CVMX_DFA_BST1 CVMX_DFA_BST1_FUNC()
+static inline uint64_t CVMX_DFA_BST1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_BST1 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800300007F8ull);
+}
+
+#define CVMX_DFA_CFG CVMX_DFA_CFG_FUNC()
+static inline uint64_t CVMX_DFA_CFG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_CFG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000000ull);
+}
+
+#define CVMX_DFA_DBELL CVMX_DFA_DBELL_FUNC()
+static inline uint64_t CVMX_DFA_DBELL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_DBELL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001370000000000ull);
+}
+
+#define CVMX_DFA_DDR2_ADDR CVMX_DFA_DDR2_ADDR_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_ADDR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_ADDR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000210ull);
+}
+
+#define CVMX_DFA_DDR2_BUS CVMX_DFA_DDR2_BUS_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_BUS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_BUS not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000080ull);
+}
+
+#define CVMX_DFA_DDR2_CFG CVMX_DFA_DDR2_CFG_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_CFG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_CFG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000208ull);
+}
+
+#define CVMX_DFA_DDR2_COMP CVMX_DFA_DDR2_COMP_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_COMP_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_COMP not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000090ull);
+}
+
+#define CVMX_DFA_DDR2_EMRS CVMX_DFA_DDR2_EMRS_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_EMRS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_EMRS not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000268ull);
+}
+
+#define CVMX_DFA_DDR2_FCNT CVMX_DFA_DDR2_FCNT_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_FCNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_FCNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000078ull);
+}
+
+#define CVMX_DFA_DDR2_MRS CVMX_DFA_DDR2_MRS_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_MRS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_MRS not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000260ull);
+}
+
+#define CVMX_DFA_DDR2_OPT CVMX_DFA_DDR2_OPT_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_OPT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_OPT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000070ull);
+}
+
+#define CVMX_DFA_DDR2_PLL CVMX_DFA_DDR2_PLL_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_PLL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_PLL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000088ull);
+}
+
+#define CVMX_DFA_DDR2_TMG CVMX_DFA_DDR2_TMG_FUNC()
+static inline uint64_t CVMX_DFA_DDR2_TMG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_DDR2_TMG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000218ull);
+}
+
+#define CVMX_DFA_DIFCTL CVMX_DFA_DIFCTL_FUNC()
+static inline uint64_t CVMX_DFA_DIFCTL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_DIFCTL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001370600000000ull);
+}
+
+#define CVMX_DFA_DIFRDPTR CVMX_DFA_DIFRDPTR_FUNC()
+static inline uint64_t CVMX_DFA_DIFRDPTR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_DIFRDPTR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001370200000000ull);
+}
+
+#define CVMX_DFA_ECLKCFG CVMX_DFA_ECLKCFG_FUNC()
+static inline uint64_t CVMX_DFA_ECLKCFG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_DFA_ECLKCFG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000200ull);
+}
+
+#define CVMX_DFA_ERR CVMX_DFA_ERR_FUNC()
+static inline uint64_t CVMX_DFA_ERR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_ERR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000028ull);
+}
+
+#define CVMX_DFA_MEMCFG0 CVMX_DFA_MEMCFG0_FUNC()
+static inline uint64_t CVMX_DFA_MEMCFG0_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMCFG0 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000008ull);
+}
+
+#define CVMX_DFA_MEMCFG1 CVMX_DFA_MEMCFG1_FUNC()
+static inline uint64_t CVMX_DFA_MEMCFG1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMCFG1 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000010ull);
+}
+
+#define CVMX_DFA_MEMCFG2 CVMX_DFA_MEMCFG2_FUNC()
+static inline uint64_t CVMX_DFA_MEMCFG2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMCFG2 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000060ull);
+}
+
+#define CVMX_DFA_MEMFADR CVMX_DFA_MEMFADR_FUNC()
+static inline uint64_t CVMX_DFA_MEMFADR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMFADR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000030ull);
+}
+
+#define CVMX_DFA_MEMFCR CVMX_DFA_MEMFCR_FUNC()
+static inline uint64_t CVMX_DFA_MEMFCR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMFCR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000038ull);
+}
+
+#define CVMX_DFA_MEMRLD CVMX_DFA_MEMRLD_FUNC()
+static inline uint64_t CVMX_DFA_MEMRLD_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_MEMRLD not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000018ull);
+}
+
+#define CVMX_DFA_NCBCTL CVMX_DFA_NCBCTL_FUNC()
+static inline uint64_t CVMX_DFA_NCBCTL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_NCBCTL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000020ull);
+}
+
+#define CVMX_DFA_RODT_COMP_CTL CVMX_DFA_RODT_COMP_CTL_FUNC()
+static inline uint64_t CVMX_DFA_RODT_COMP_CTL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_RODT_COMP_CTL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000068ull);
+}
+
+#define CVMX_DFA_SBD_DBG0 CVMX_DFA_SBD_DBG0_FUNC()
+static inline uint64_t CVMX_DFA_SBD_DBG0_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_SBD_DBG0 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000040ull);
+}
+
+#define CVMX_DFA_SBD_DBG1 CVMX_DFA_SBD_DBG1_FUNC()
+static inline uint64_t CVMX_DFA_SBD_DBG1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_SBD_DBG1 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000048ull);
+}
+
+#define CVMX_DFA_SBD_DBG2 CVMX_DFA_SBD_DBG2_FUNC()
+static inline uint64_t CVMX_DFA_SBD_DBG2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_SBD_DBG2 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000050ull);
+}
+
+#define CVMX_DFA_SBD_DBG3 CVMX_DFA_SBD_DBG3_FUNC()
+static inline uint64_t CVMX_DFA_SBD_DBG3_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_DFA_SBD_DBG3 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180030000058ull);
+}
+
+#define CVMX_FPA_BIST_STATUS CVMX_FPA_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_FPA_BIST_STATUS_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800280000E8ull);
+}
+
+#define CVMX_FPA_CTL_STATUS CVMX_FPA_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_FPA_CTL_STATUS_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180028000050ull);
+}
+
+#define CVMX_FPA_FPF0_MARKS CVMX_FPA_FPF0_MARKS_FUNC()
+static inline uint64_t CVMX_FPA_FPF0_MARKS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_FPA_FPF0_MARKS not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180028000000ull);
+}
+
+#define CVMX_FPA_FPF0_SIZE CVMX_FPA_FPF0_SIZE_FUNC()
+static inline uint64_t CVMX_FPA_FPF0_SIZE_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_FPA_FPF0_SIZE not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180028000058ull);
+}
+
+#define CVMX_FPA_FPF1_MARKS CVMX_FPA_FPFX_MARKS(1)
+#define CVMX_FPA_FPF2_MARKS CVMX_FPA_FPFX_MARKS(2)
+#define CVMX_FPA_FPF3_MARKS CVMX_FPA_FPFX_MARKS(3)
+#define CVMX_FPA_FPF4_MARKS CVMX_FPA_FPFX_MARKS(4)
+#define CVMX_FPA_FPF5_MARKS CVMX_FPA_FPFX_MARKS(5)
+#define CVMX_FPA_FPF6_MARKS CVMX_FPA_FPFX_MARKS(6)
+#define CVMX_FPA_FPF7_MARKS CVMX_FPA_FPFX_MARKS(7)
+static inline uint64_t CVMX_FPA_FPFX_MARKS(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset >= 1) && (offset <= 7))))))
+ cvmx_warn("CVMX_FPA_FPFX_MARKS(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180028000008ull) + (offset&7)*8 - 8*1;
+}
+
+static inline uint64_t CVMX_FPA_FPFX_SIZE(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset >= 1) && (offset <= 7)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset >= 1) && (offset <= 7))))))
+ cvmx_warn("CVMX_FPA_FPFX_SIZE(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180028000060ull) + (offset&7)*8 - 8*1;
+}
+
+#define CVMX_FPA_INT_ENB CVMX_FPA_INT_ENB_FUNC()
+static inline uint64_t CVMX_FPA_INT_ENB_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180028000048ull);
+}
+
+#define CVMX_FPA_INT_SUM CVMX_FPA_INT_SUM_FUNC()
+static inline uint64_t CVMX_FPA_INT_SUM_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180028000040ull);
+}
+
+#define CVMX_FPA_QUE0_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(0)
+#define CVMX_FPA_QUE1_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(1)
+#define CVMX_FPA_QUE2_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(2)
+#define CVMX_FPA_QUE3_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(3)
+#define CVMX_FPA_QUE4_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(4)
+#define CVMX_FPA_QUE5_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(5)
+#define CVMX_FPA_QUE6_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(6)
+#define CVMX_FPA_QUE7_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(7)
+static inline uint64_t CVMX_FPA_QUEX_AVAILABLE(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_FPA_QUEX_AVAILABLE(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180028000098ull) + (offset&7)*8;
+}
+
+static inline uint64_t CVMX_FPA_QUEX_PAGE_INDEX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_FPA_QUEX_PAGE_INDEX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800280000F0ull) + (offset&7)*8;
+}
+
+#define CVMX_FPA_QUE_ACT CVMX_FPA_QUE_ACT_FUNC()
+static inline uint64_t CVMX_FPA_QUE_ACT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180028000138ull);
+}
+
+#define CVMX_FPA_QUE_EXP CVMX_FPA_QUE_EXP_FUNC()
+static inline uint64_t CVMX_FPA_QUE_EXP_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180028000130ull);
+}
+
+#define CVMX_FPA_WART_CTL CVMX_FPA_WART_CTL_FUNC()
+static inline uint64_t CVMX_FPA_WART_CTL_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800280000D8ull);
+}
+
+#define CVMX_FPA_WART_STATUS CVMX_FPA_WART_STATUS_FUNC()
+static inline uint64_t CVMX_FPA_WART_STATUS_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800280000E0ull);
+}
+
+static inline uint64_t CVMX_GMXX_BAD_REG(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_BAD_REG(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000518ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_BIST(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_BIST(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000400ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_CLK_EN(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_CLK_EN(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_HG2_CONTROL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000550ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_INF_MODE(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_INF_MODE(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_NXA_ADR(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_NXA_ADR(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000510ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_PRTX_CBFC_CTL(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_PRTX_CBFC_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((offset&0) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_PRTX_CFG(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM0(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM1(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM2(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM3(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM4(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM5(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CAM_EN(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_ADR_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_DECISION(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_DECISION(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_FRM_CHK(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_FRM_CHK(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_FRM_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_FRM_MAX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_RXX_FRM_MAX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000030ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_FRM_MIN(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_RXX_FRM_MIN(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000028ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_IFG(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_IFG(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_INT_EN(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_INT_REG(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_JABBER(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_PAUSE_DROP_TIME(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_PAUSE_DROP_TIME(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_RX_INBND(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_RXX_RX_INBND(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000060ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_STATS_CTL(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_STATS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_CTL(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DMAC(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DMAC(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DRP(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DRP(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_BAD(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_BAD(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_CTL(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DMAC(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DMAC(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DRP(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DRP(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_UDD_SKP(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RXX_UDD_SKP(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_RX_BP_DROPX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RX_BP_DROPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset&3) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_GMXX_RX_BP_OFFX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RX_BP_OFFX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset&3) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_GMXX_RX_BP_ONX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_RX_BP_ONX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset&3) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_GMXX_RX_HG2_STATUS(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_RX_HG2_STATUS(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000548ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_RX_PASS_EN(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_GMXX_RX_PASS_EN(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080005F8ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_RX_PASS_MAPX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 15)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_RX_PASS_MAPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000600ull) + ((offset&15) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_GMXX_RX_PRTS(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_RX_PRTS(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_RX_PRT_INFO(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_RX_PRT_INFO(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_RX_TX_STATUS(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_RX_TX_STATUS(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + (block_id&0)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_RX_XAUI_BAD_COL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_RX_XAUI_BAD_COL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000538ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_RX_XAUI_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000530ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_SMACX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_STAT_BP(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_STAT_BP(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000520ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_APPEND(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_APPEND(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_BURST(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_CBFC_XOFF(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_CBFC_XOFF(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((offset&0) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_CBFC_XON(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_CBFC_XON(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((offset&0) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_CLK(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_TXX_CLK(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000208ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_MIN_PKT(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_MIN_PKT(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_TIME(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_PAUSE_TOGO(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_PAUSE_TOGO(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_PAUSE_ZERO(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_PAUSE_ZERO(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_SGMII_CTL(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_SGMII_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_SLOT(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_SOFT_PAUSE(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_SOFT_PAUSE(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STAT0(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STAT0(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STAT1(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STAT1(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STAT2(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STAT2(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STAT3(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STAT3(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STAT4(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STAT4(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STAT5(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STAT5(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STAT6(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STAT6(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STAT7(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STAT7(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STAT8(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STAT8(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STAT9(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STAT9(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_STATS_CTL(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_STATS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TXX_THRESH(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset&3) + (block_id&1)*0x10000ull)*2048;
+}
+
+static inline uint64_t CVMX_GMXX_TX_BP(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_BP(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_CLK_MSKX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 1)) && ((block_id == 0)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 1)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_GMXX_TX_CLK_MSKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000780ull) + ((offset&1) + (block_id&0)*0x0ull)*8;
+}
+
+static inline uint64_t CVMX_GMXX_TX_COL_ATTEMPT(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_COL_ATTEMPT(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000498ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_CORRUPT(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_CORRUPT(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_HG2_REG1(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_HG2_REG1(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000558ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_HG2_REG2(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_HG2_REG2(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000560ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_IFG(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_IFG(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000488ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_INT_EN(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000508ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_INT_REG(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_INT_REG(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_JAM(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_JAM(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000490ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_LFSR(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_LFSR(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_OVR_BP(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_OVR_BP(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_DMAC(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_DMAC(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_TYPE(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_TYPE(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_PRTS(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000480ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_SPI_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_GMXX_TX_SPI_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004C0ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_SPI_DRAIN(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_GMXX_TX_SPI_DRAIN(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004E0ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_SPI_MAX(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_GMXX_TX_SPI_MAX(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004B0ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_SPI_ROUNDX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 31)) && ((block_id <= 1))))))
+ cvmx_warn("CVMX_GMXX_TX_SPI_ROUNDX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000680ull) + ((offset&31) + (block_id&1)*0x1000000ull)*8;
+}
+
+static inline uint64_t CVMX_GMXX_TX_SPI_THRESH(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_GMXX_TX_SPI_THRESH(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800080004B8ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_XAUI_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_TX_XAUI_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_GMXX_XAUI_EXT_LOOPBACK(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id&1)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_GPIO_BIT_CFGX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 15)))))
+ cvmx_warn("CVMX_GPIO_BIT_CFGX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000800ull) + (offset&15)*8;
+}
+
+#define CVMX_GPIO_BOOT_ENA CVMX_GPIO_BOOT_ENA_FUNC()
+static inline uint64_t CVMX_GPIO_BOOT_ENA_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)))
+ cvmx_warn("CVMX_GPIO_BOOT_ENA not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00010700000008A8ull);
+}
+
+static inline uint64_t CVMX_GPIO_CLK_GENX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_GPIO_CLK_GENX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00010700000008C0ull) + (offset&3)*8;
+}
+
+#define CVMX_GPIO_DBG_ENA CVMX_GPIO_DBG_ENA_FUNC()
+static inline uint64_t CVMX_GPIO_DBG_ENA_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)))
+ cvmx_warn("CVMX_GPIO_DBG_ENA not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00010700000008A0ull);
+}
+
+#define CVMX_GPIO_INT_CLR CVMX_GPIO_INT_CLR_FUNC()
+static inline uint64_t CVMX_GPIO_INT_CLR_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000898ull);
+}
+
+#define CVMX_GPIO_RX_DAT CVMX_GPIO_RX_DAT_FUNC()
+static inline uint64_t CVMX_GPIO_RX_DAT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000880ull);
+}
+
+#define CVMX_GPIO_TX_CLR CVMX_GPIO_TX_CLR_FUNC()
+static inline uint64_t CVMX_GPIO_TX_CLR_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000890ull);
+}
+
+#define CVMX_GPIO_TX_SET CVMX_GPIO_TX_SET_FUNC()
+static inline uint64_t CVMX_GPIO_TX_SET_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001070000000888ull);
+}
+
+static inline uint64_t CVMX_GPIO_XBIT_CFGX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset >= 16) && (offset <= 23)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset >= 16) && (offset <= 23)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset >= 16) && (offset <= 23))))))
+ cvmx_warn("CVMX_GPIO_XBIT_CFGX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000000900ull) + (offset&31)*8 - 8*16;
+}
+
+#define CVMX_IOB_BIST_STATUS CVMX_IOB_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_IOB_BIST_STATUS_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F00007F8ull);
+}
+
+#define CVMX_IOB_CTL_STATUS CVMX_IOB_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_IOB_CTL_STATUS_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000050ull);
+}
+
+#define CVMX_IOB_DWB_PRI_CNT CVMX_IOB_DWB_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_DWB_PRI_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_IOB_DWB_PRI_CNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800F0000028ull);
+}
+
+#define CVMX_IOB_FAU_TIMEOUT CVMX_IOB_FAU_TIMEOUT_FUNC()
+static inline uint64_t CVMX_IOB_FAU_TIMEOUT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000000ull);
+}
+
+#define CVMX_IOB_I2C_PRI_CNT CVMX_IOB_I2C_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_I2C_PRI_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_IOB_I2C_PRI_CNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800F0000010ull);
+}
+
+#define CVMX_IOB_INB_CONTROL_MATCH CVMX_IOB_INB_CONTROL_MATCH_FUNC()
+static inline uint64_t CVMX_IOB_INB_CONTROL_MATCH_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000078ull);
+}
+
+#define CVMX_IOB_INB_CONTROL_MATCH_ENB CVMX_IOB_INB_CONTROL_MATCH_ENB_FUNC()
+static inline uint64_t CVMX_IOB_INB_CONTROL_MATCH_ENB_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000088ull);
+}
+
+#define CVMX_IOB_INB_DATA_MATCH CVMX_IOB_INB_DATA_MATCH_FUNC()
+static inline uint64_t CVMX_IOB_INB_DATA_MATCH_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000070ull);
+}
+
+#define CVMX_IOB_INB_DATA_MATCH_ENB CVMX_IOB_INB_DATA_MATCH_ENB_FUNC()
+static inline uint64_t CVMX_IOB_INB_DATA_MATCH_ENB_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000080ull);
+}
+
+#define CVMX_IOB_INT_ENB CVMX_IOB_INT_ENB_FUNC()
+static inline uint64_t CVMX_IOB_INT_ENB_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000060ull);
+}
+
+#define CVMX_IOB_INT_SUM CVMX_IOB_INT_SUM_FUNC()
+static inline uint64_t CVMX_IOB_INT_SUM_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000058ull);
+}
+
+#define CVMX_IOB_N2C_L2C_PRI_CNT CVMX_IOB_N2C_L2C_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_N2C_L2C_PRI_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_IOB_N2C_L2C_PRI_CNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800F0000020ull);
+}
+
+#define CVMX_IOB_N2C_RSP_PRI_CNT CVMX_IOB_N2C_RSP_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_N2C_RSP_PRI_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_IOB_N2C_RSP_PRI_CNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800F0000008ull);
+}
+
+#define CVMX_IOB_OUTB_COM_PRI_CNT CVMX_IOB_OUTB_COM_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_OUTB_COM_PRI_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_IOB_OUTB_COM_PRI_CNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800F0000040ull);
+}
+
+#define CVMX_IOB_OUTB_CONTROL_MATCH CVMX_IOB_OUTB_CONTROL_MATCH_FUNC()
+static inline uint64_t CVMX_IOB_OUTB_CONTROL_MATCH_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000098ull);
+}
+
+#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB CVMX_IOB_OUTB_CONTROL_MATCH_ENB_FUNC()
+static inline uint64_t CVMX_IOB_OUTB_CONTROL_MATCH_ENB_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F00000A8ull);
+}
+
+#define CVMX_IOB_OUTB_DATA_MATCH CVMX_IOB_OUTB_DATA_MATCH_FUNC()
+static inline uint64_t CVMX_IOB_OUTB_DATA_MATCH_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000090ull);
+}
+
+#define CVMX_IOB_OUTB_DATA_MATCH_ENB CVMX_IOB_OUTB_DATA_MATCH_ENB_FUNC()
+static inline uint64_t CVMX_IOB_OUTB_DATA_MATCH_ENB_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F00000A0ull);
+}
+
+#define CVMX_IOB_OUTB_FPA_PRI_CNT CVMX_IOB_OUTB_FPA_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_OUTB_FPA_PRI_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_IOB_OUTB_FPA_PRI_CNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800F0000048ull);
+}
+
+#define CVMX_IOB_OUTB_REQ_PRI_CNT CVMX_IOB_OUTB_REQ_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_OUTB_REQ_PRI_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_IOB_OUTB_REQ_PRI_CNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800F0000038ull);
+}
+
+#define CVMX_IOB_P2C_REQ_PRI_CNT CVMX_IOB_P2C_REQ_PRI_CNT_FUNC()
+static inline uint64_t CVMX_IOB_P2C_REQ_PRI_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_IOB_P2C_REQ_PRI_CNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800F0000018ull);
+}
+
+#define CVMX_IOB_PKT_ERR CVMX_IOB_PKT_ERR_FUNC()
+static inline uint64_t CVMX_IOB_PKT_ERR_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800F0000068ull);
+}
+
+#define CVMX_IOB_TO_CMB_CREDITS CVMX_IOB_TO_CMB_CREDITS_FUNC()
+static inline uint64_t CVMX_IOB_TO_CMB_CREDITS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_IOB_TO_CMB_CREDITS not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800F00000B0ull);
+}
+
+#define CVMX_IPD_1ST_MBUFF_SKIP CVMX_IPD_1ST_MBUFF_SKIP_FUNC()
+static inline uint64_t CVMX_IPD_1ST_MBUFF_SKIP_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000000ull);
+}
+
+#define CVMX_IPD_1st_NEXT_PTR_BACK CVMX_IPD_1st_NEXT_PTR_BACK_FUNC()
+static inline uint64_t CVMX_IPD_1st_NEXT_PTR_BACK_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000150ull);
+}
+
+#define CVMX_IPD_2nd_NEXT_PTR_BACK CVMX_IPD_2nd_NEXT_PTR_BACK_FUNC()
+static inline uint64_t CVMX_IPD_2nd_NEXT_PTR_BACK_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000158ull);
+}
+
+#define CVMX_IPD_BIST_STATUS CVMX_IPD_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_IPD_BIST_STATUS_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F00000007F8ull);
+}
+
+#define CVMX_IPD_BP_PRT_RED_END CVMX_IPD_BP_PRT_RED_END_FUNC()
+static inline uint64_t CVMX_IPD_BP_PRT_RED_END_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000328ull);
+}
+
+#define CVMX_IPD_CLK_COUNT CVMX_IPD_CLK_COUNT_FUNC()
+static inline uint64_t CVMX_IPD_CLK_COUNT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000338ull);
+}
+
+#define CVMX_IPD_CTL_STATUS CVMX_IPD_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_IPD_CTL_STATUS_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000018ull);
+}
+
+#define CVMX_IPD_INT_ENB CVMX_IPD_INT_ENB_FUNC()
+static inline uint64_t CVMX_IPD_INT_ENB_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000160ull);
+}
+
+#define CVMX_IPD_INT_SUM CVMX_IPD_INT_SUM_FUNC()
+static inline uint64_t CVMX_IPD_INT_SUM_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000168ull);
+}
+
+#define CVMX_IPD_NOT_1ST_MBUFF_SKIP CVMX_IPD_NOT_1ST_MBUFF_SKIP_FUNC()
+static inline uint64_t CVMX_IPD_NOT_1ST_MBUFF_SKIP_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000008ull);
+}
+
+#define CVMX_IPD_PACKET_MBUFF_SIZE CVMX_IPD_PACKET_MBUFF_SIZE_FUNC()
+static inline uint64_t CVMX_IPD_PACKET_MBUFF_SIZE_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000010ull);
+}
+
+#define CVMX_IPD_PKT_PTR_VALID CVMX_IPD_PKT_PTR_VALID_FUNC()
+static inline uint64_t CVMX_IPD_PKT_PTR_VALID_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000358ull);
+}
+
+static inline uint64_t CVMX_IPD_PORTX_BP_PAGE_CNT(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35))))))
+ cvmx_warn("CVMX_IPD_PORTX_BP_PAGE_CNT(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F0000000028ull) + (offset&63)*8;
+}
+
+static inline uint64_t CVMX_IPD_PORTX_BP_PAGE_CNT2(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_IPD_PORTX_BP_PAGE_CNT2(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F0000000368ull) + (offset&63)*8 - 8*36;
+}
+
+static inline uint64_t CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 36) && (offset <= 39)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset >= 36) && (offset <= 39))))))
+ cvmx_warn("CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F0000000388ull) + (offset&63)*8 - 8*36;
+}
+
+static inline uint64_t CVMX_IPD_PORT_BP_COUNTERS_PAIRX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3) || ((offset >= 16) && (offset <= 19)) || ((offset >= 32) && (offset <= 35)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 2) || (offset == 32))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 2) || ((offset >= 32) && (offset <= 33)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 35))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3) || ((offset >= 32) && (offset <= 35))))))
+ cvmx_warn("CVMX_IPD_PORT_BP_COUNTERS_PAIRX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F00000001B8ull) + (offset&63)*8;
+}
+
+static inline uint64_t CVMX_IPD_PORT_QOS_INTX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0) || (offset == 2) || (offset == 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset == 0) || (offset == 4)))))
+ cvmx_warn("CVMX_IPD_PORT_QOS_INTX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F0000000808ull) + (offset&7)*8;
+}
+
+static inline uint64_t CVMX_IPD_PORT_QOS_INT_ENBX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0) || (offset == 2) || (offset == 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset == 0) || (offset == 4)))))
+ cvmx_warn("CVMX_IPD_PORT_QOS_INT_ENBX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F0000000848ull) + (offset&7)*8;
+}
+
+static inline uint64_t CVMX_IPD_PORT_QOS_X_CNT(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31) || ((offset >= 128) && (offset <= 159)) || ((offset >= 256) && (offset <= 319)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31) || ((offset >= 256) && (offset <= 319))))))
+ cvmx_warn("CVMX_IPD_PORT_QOS_X_CNT(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F0000000888ull) + (offset&511)*8;
+}
+
+#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL_FUNC()
+static inline uint64_t CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000348ull);
+}
+
+#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL CVMX_IPD_PRC_PORT_PTR_FIFO_CTL_FUNC()
+static inline uint64_t CVMX_IPD_PRC_PORT_PTR_FIFO_CTL_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000350ull);
+}
+
+#define CVMX_IPD_PTR_COUNT CVMX_IPD_PTR_COUNT_FUNC()
+static inline uint64_t CVMX_IPD_PTR_COUNT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000320ull);
+}
+
+#define CVMX_IPD_PWP_PTR_FIFO_CTL CVMX_IPD_PWP_PTR_FIFO_CTL_FUNC()
+static inline uint64_t CVMX_IPD_PWP_PTR_FIFO_CTL_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000340ull);
+}
+
+#define CVMX_IPD_QOS0_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(0)
+#define CVMX_IPD_QOS1_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(1)
+#define CVMX_IPD_QOS2_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(2)
+#define CVMX_IPD_QOS3_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(3)
+#define CVMX_IPD_QOS4_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(4)
+#define CVMX_IPD_QOS5_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(5)
+#define CVMX_IPD_QOS6_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(6)
+#define CVMX_IPD_QOS7_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(7)
+static inline uint64_t CVMX_IPD_QOSX_RED_MARKS(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_IPD_QOSX_RED_MARKS(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F0000000178ull) + (offset&7)*8;
+}
+
+#define CVMX_IPD_QUE0_FREE_PAGE_CNT CVMX_IPD_QUE0_FREE_PAGE_CNT_FUNC()
+static inline uint64_t CVMX_IPD_QUE0_FREE_PAGE_CNT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000330ull);
+}
+
+#define CVMX_IPD_RED_PORT_ENABLE CVMX_IPD_RED_PORT_ENABLE_FUNC()
+static inline uint64_t CVMX_IPD_RED_PORT_ENABLE_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F00000002D8ull);
+}
+
+#define CVMX_IPD_RED_PORT_ENABLE2 CVMX_IPD_RED_PORT_ENABLE2_FUNC()
+static inline uint64_t CVMX_IPD_RED_PORT_ENABLE2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_IPD_RED_PORT_ENABLE2 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F00000003A8ull);
+}
+
+#define CVMX_IPD_RED_QUE0_PARAM CVMX_IPD_RED_QUEX_PARAM(0)
+#define CVMX_IPD_RED_QUE1_PARAM CVMX_IPD_RED_QUEX_PARAM(1)
+#define CVMX_IPD_RED_QUE2_PARAM CVMX_IPD_RED_QUEX_PARAM(2)
+#define CVMX_IPD_RED_QUE3_PARAM CVMX_IPD_RED_QUEX_PARAM(3)
+#define CVMX_IPD_RED_QUE4_PARAM CVMX_IPD_RED_QUEX_PARAM(4)
+#define CVMX_IPD_RED_QUE5_PARAM CVMX_IPD_RED_QUEX_PARAM(5)
+#define CVMX_IPD_RED_QUE6_PARAM CVMX_IPD_RED_QUEX_PARAM(6)
+#define CVMX_IPD_RED_QUE7_PARAM CVMX_IPD_RED_QUEX_PARAM(7)
+static inline uint64_t CVMX_IPD_RED_QUEX_PARAM(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_IPD_RED_QUEX_PARAM(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F00000002E0ull) + (offset&7)*8;
+}
+
+#define CVMX_IPD_SUB_PORT_BP_PAGE_CNT CVMX_IPD_SUB_PORT_BP_PAGE_CNT_FUNC()
+static inline uint64_t CVMX_IPD_SUB_PORT_BP_PAGE_CNT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000148ull);
+}
+
+#define CVMX_IPD_SUB_PORT_FCS CVMX_IPD_SUB_PORT_FCS_FUNC()
+static inline uint64_t CVMX_IPD_SUB_PORT_FCS_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000170ull);
+}
+
+#define CVMX_IPD_SUB_PORT_QOS_CNT CVMX_IPD_SUB_PORT_QOS_CNT_FUNC()
+static inline uint64_t CVMX_IPD_SUB_PORT_QOS_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_IPD_SUB_PORT_QOS_CNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00014F0000000800ull);
+}
+
+#define CVMX_IPD_WQE_FPA_QUEUE CVMX_IPD_WQE_FPA_QUEUE_FUNC()
+static inline uint64_t CVMX_IPD_WQE_FPA_QUEUE_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000020ull);
+}
+
+#define CVMX_IPD_WQE_PTR_VALID CVMX_IPD_WQE_PTR_VALID_FUNC()
+static inline uint64_t CVMX_IPD_WQE_PTR_VALID_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00014F0000000360ull);
+}
+
+#define CVMX_KEY_BIST_REG CVMX_KEY_BIST_REG_FUNC()
+static inline uint64_t CVMX_KEY_BIST_REG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_KEY_BIST_REG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180020000018ull);
+}
+
+#define CVMX_KEY_CTL_STATUS CVMX_KEY_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_KEY_CTL_STATUS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_KEY_CTL_STATUS not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180020000010ull);
+}
+
+#define CVMX_KEY_INT_ENB CVMX_KEY_INT_ENB_FUNC()
+static inline uint64_t CVMX_KEY_INT_ENB_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_KEY_INT_ENB not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180020000008ull);
+}
+
+#define CVMX_KEY_INT_SUM CVMX_KEY_INT_SUM_FUNC()
+static inline uint64_t CVMX_KEY_INT_SUM_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_KEY_INT_SUM not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180020000000ull);
+}
+
+#define CVMX_L2C_BST0 CVMX_L2C_BST0_FUNC()
+static inline uint64_t CVMX_L2C_BST0_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800800007F8ull);
+}
+
+#define CVMX_L2C_BST1 CVMX_L2C_BST1_FUNC()
+static inline uint64_t CVMX_L2C_BST1_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800800007F0ull);
+}
+
+#define CVMX_L2C_BST2 CVMX_L2C_BST2_FUNC()
+static inline uint64_t CVMX_L2C_BST2_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800800007E8ull);
+}
+
+#define CVMX_L2C_CFG CVMX_L2C_CFG_FUNC()
+static inline uint64_t CVMX_L2C_CFG_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000000ull);
+}
+
+#define CVMX_L2C_DBG CVMX_L2C_DBG_FUNC()
+static inline uint64_t CVMX_L2C_DBG_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000030ull);
+}
+
+#define CVMX_L2C_DUT CVMX_L2C_DUT_FUNC()
+static inline uint64_t CVMX_L2C_DUT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000050ull);
+}
+
+#define CVMX_L2C_GRPWRR0 CVMX_L2C_GRPWRR0_FUNC()
+static inline uint64_t CVMX_L2C_GRPWRR0_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_GRPWRR0 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800800000C8ull);
+}
+
+#define CVMX_L2C_GRPWRR1 CVMX_L2C_GRPWRR1_FUNC()
+static inline uint64_t CVMX_L2C_GRPWRR1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_GRPWRR1 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800800000D0ull);
+}
+
+#define CVMX_L2C_INT_EN CVMX_L2C_INT_EN_FUNC()
+static inline uint64_t CVMX_L2C_INT_EN_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_INT_EN not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180080000100ull);
+}
+
+#define CVMX_L2C_INT_STAT CVMX_L2C_INT_STAT_FUNC()
+static inline uint64_t CVMX_L2C_INT_STAT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_INT_STAT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800800000F8ull);
+}
+
+#define CVMX_L2C_LCKBASE CVMX_L2C_LCKBASE_FUNC()
+static inline uint64_t CVMX_L2C_LCKBASE_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000058ull);
+}
+
+#define CVMX_L2C_LCKOFF CVMX_L2C_LCKOFF_FUNC()
+static inline uint64_t CVMX_L2C_LCKOFF_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000060ull);
+}
+
+#define CVMX_L2C_LFB0 CVMX_L2C_LFB0_FUNC()
+static inline uint64_t CVMX_L2C_LFB0_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000038ull);
+}
+
+#define CVMX_L2C_LFB1 CVMX_L2C_LFB1_FUNC()
+static inline uint64_t CVMX_L2C_LFB1_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000040ull);
+}
+
+#define CVMX_L2C_LFB2 CVMX_L2C_LFB2_FUNC()
+static inline uint64_t CVMX_L2C_LFB2_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000048ull);
+}
+
+#define CVMX_L2C_LFB3 CVMX_L2C_LFB3_FUNC()
+static inline uint64_t CVMX_L2C_LFB3_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800800000B8ull);
+}
+
+#define CVMX_L2C_OOB CVMX_L2C_OOB_FUNC()
+static inline uint64_t CVMX_L2C_OOB_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_OOB not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800800000D8ull);
+}
+
+#define CVMX_L2C_OOB1 CVMX_L2C_OOB1_FUNC()
+static inline uint64_t CVMX_L2C_OOB1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_OOB1 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800800000E0ull);
+}
+
+#define CVMX_L2C_OOB2 CVMX_L2C_OOB2_FUNC()
+static inline uint64_t CVMX_L2C_OOB2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_OOB2 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800800000E8ull);
+}
+
+#define CVMX_L2C_OOB3 CVMX_L2C_OOB3_FUNC()
+static inline uint64_t CVMX_L2C_OOB3_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_OOB3 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800800000F0ull);
+}
+
+#define CVMX_L2C_PFC0 CVMX_L2C_PFCX(0)
+#define CVMX_L2C_PFC1 CVMX_L2C_PFCX(1)
+#define CVMX_L2C_PFC2 CVMX_L2C_PFCX(2)
+#define CVMX_L2C_PFC3 CVMX_L2C_PFCX(3)
+#define CVMX_L2C_PFCTL CVMX_L2C_PFCTL_FUNC()
+static inline uint64_t CVMX_L2C_PFCTL_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000090ull);
+}
+
+static inline uint64_t CVMX_L2C_PFCX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_L2C_PFCX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180080000098ull) + (offset&3)*8;
+}
+
+#define CVMX_L2C_PPGRP CVMX_L2C_PPGRP_FUNC()
+static inline uint64_t CVMX_L2C_PPGRP_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_L2C_PPGRP not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800800000C0ull);
+}
+
+#define CVMX_L2C_SPAR0 CVMX_L2C_SPAR0_FUNC()
+static inline uint64_t CVMX_L2C_SPAR0_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000068ull);
+}
+
+#define CVMX_L2C_SPAR1 CVMX_L2C_SPAR1_FUNC()
+static inline uint64_t CVMX_L2C_SPAR1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_L2C_SPAR1 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180080000070ull);
+}
+
+#define CVMX_L2C_SPAR2 CVMX_L2C_SPAR2_FUNC()
+static inline uint64_t CVMX_L2C_SPAR2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_L2C_SPAR2 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180080000078ull);
+}
+
+#define CVMX_L2C_SPAR3 CVMX_L2C_SPAR3_FUNC()
+static inline uint64_t CVMX_L2C_SPAR3_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_L2C_SPAR3 not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180080000080ull);
+}
+
+#define CVMX_L2C_SPAR4 CVMX_L2C_SPAR4_FUNC()
+static inline uint64_t CVMX_L2C_SPAR4_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000088ull);
+}
+
+#define CVMX_L2D_BST0 CVMX_L2D_BST0_FUNC()
+static inline uint64_t CVMX_L2D_BST0_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000780ull);
+}
+
+#define CVMX_L2D_BST1 CVMX_L2D_BST1_FUNC()
+static inline uint64_t CVMX_L2D_BST1_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000788ull);
+}
+
+#define CVMX_L2D_BST2 CVMX_L2D_BST2_FUNC()
+static inline uint64_t CVMX_L2D_BST2_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000790ull);
+}
+
+#define CVMX_L2D_BST3 CVMX_L2D_BST3_FUNC()
+static inline uint64_t CVMX_L2D_BST3_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000798ull);
+}
+
+#define CVMX_L2D_ERR CVMX_L2D_ERR_FUNC()
+static inline uint64_t CVMX_L2D_ERR_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000010ull);
+}
+
+#define CVMX_L2D_FADR CVMX_L2D_FADR_FUNC()
+static inline uint64_t CVMX_L2D_FADR_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000018ull);
+}
+
+#define CVMX_L2D_FSYN0 CVMX_L2D_FSYN0_FUNC()
+static inline uint64_t CVMX_L2D_FSYN0_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000020ull);
+}
+
+#define CVMX_L2D_FSYN1 CVMX_L2D_FSYN1_FUNC()
+static inline uint64_t CVMX_L2D_FSYN1_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000028ull);
+}
+
+#define CVMX_L2D_FUS0 CVMX_L2D_FUS0_FUNC()
+static inline uint64_t CVMX_L2D_FUS0_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800800007A0ull);
+}
+
+#define CVMX_L2D_FUS1 CVMX_L2D_FUS1_FUNC()
+static inline uint64_t CVMX_L2D_FUS1_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800800007A8ull);
+}
+
+#define CVMX_L2D_FUS2 CVMX_L2D_FUS2_FUNC()
+static inline uint64_t CVMX_L2D_FUS2_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800800007B0ull);
+}
+
+#define CVMX_L2D_FUS3 CVMX_L2D_FUS3_FUNC()
+static inline uint64_t CVMX_L2D_FUS3_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800800007B8ull);
+}
+
+#define CVMX_L2T_ERR CVMX_L2T_ERR_FUNC()
+static inline uint64_t CVMX_L2T_ERR_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180080000008ull);
+}
+
+#define CVMX_LED_BLINK CVMX_LED_BLINK_FUNC()
+static inline uint64_t CVMX_LED_BLINK_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_BLINK not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001A48ull);
+}
+
+#define CVMX_LED_CLK_PHASE CVMX_LED_CLK_PHASE_FUNC()
+static inline uint64_t CVMX_LED_CLK_PHASE_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_CLK_PHASE not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001A08ull);
+}
+
+#define CVMX_LED_CYLON CVMX_LED_CYLON_FUNC()
+static inline uint64_t CVMX_LED_CYLON_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_CYLON not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001AF8ull);
+}
+
+#define CVMX_LED_DBG CVMX_LED_DBG_FUNC()
+static inline uint64_t CVMX_LED_DBG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_DBG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001A18ull);
+}
+
+#define CVMX_LED_EN CVMX_LED_EN_FUNC()
+static inline uint64_t CVMX_LED_EN_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_EN not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001A00ull);
+}
+
+#define CVMX_LED_POLARITY CVMX_LED_POLARITY_FUNC()
+static inline uint64_t CVMX_LED_POLARITY_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_POLARITY not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001A50ull);
+}
+
+#define CVMX_LED_PRT CVMX_LED_PRT_FUNC()
+static inline uint64_t CVMX_LED_PRT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_PRT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001A10ull);
+}
+
+#define CVMX_LED_PRT_FMT CVMX_LED_PRT_FMT_FUNC()
+static inline uint64_t CVMX_LED_PRT_FMT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ cvmx_warn("CVMX_LED_PRT_FMT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001A30ull);
+}
+
+static inline uint64_t CVMX_LED_PRT_STATUSX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_LED_PRT_STATUSX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001A80ull) + (offset&7)*8;
+}
+
+static inline uint64_t CVMX_LED_UDD_CNTX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_LED_UDD_CNTX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001A20ull) + (offset&1)*8;
+}
+
+static inline uint64_t CVMX_LED_UDD_DATX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_LED_UDD_DATX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001A38ull) + (offset&1)*8;
+}
+
+static inline uint64_t CVMX_LED_UDD_DAT_CLRX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_LED_UDD_DAT_CLRX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001AC8ull) + (offset&1)*16;
+}
+
+static inline uint64_t CVMX_LED_UDD_DAT_SETX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_LED_UDD_DAT_SETX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001AC0ull) + (offset&1)*16;
+}
+
+static inline uint64_t CVMX_LMCX_BIST_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_BIST_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800880000F0ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_BIST_RESULT(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_BIST_RESULT(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800880000F8ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_COMP_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_COMP_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000028ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000010ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_CTL1(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_CTL1(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000090ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_DCLK_CNT_HI(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DCLK_CNT_HI(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000070ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_DCLK_CNT_LO(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DCLK_CNT_LO(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000068ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_DCLK_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
+ cvmx_warn("CVMX_LMCX_DCLK_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800880000B8ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_DDR2_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DDR2_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000018ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_DELAY_CFG(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DELAY_CFG(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000088ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_DLL_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DLL_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800880000C0ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_DUAL_MEMCFG(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_DUAL_MEMCFG(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_ECC_SYND(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_ECC_SYND(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_FADR(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_FADR(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_IFB_CNT_HI(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_IFB_CNT_HI(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000050ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_IFB_CNT_LO(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_IFB_CNT_LO(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000048ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_MEM_CFG0(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_MEM_CFG0(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000000ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_MEM_CFG1(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_MEM_CFG1(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000008ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_NXM(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_NXM(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_OPS_CNT_HI(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_OPS_CNT_HI(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000060ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_OPS_CNT_LO(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_OPS_CNT_LO(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000058ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_PLL_BWCTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_PLL_BWCTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000040ull) + (block_id&0)*0x8000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_PLL_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_PLL_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800880000A8ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_PLL_STATUS(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_PLL_STATUS(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800880000B0ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_READ_LEVEL_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_READ_LEVEL_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000140ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_READ_LEVEL_DBG(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_READ_LEVEL_DBG(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000148ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_READ_LEVEL_RANKX(unsigned long offset, unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0))))))
+ cvmx_warn("CVMX_LMCX_READ_LEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000100ull) + ((offset&3) + (block_id&1)*0xC000000ull)*8;
+}
+
+static inline uint64_t CVMX_LMCX_RODT_COMP_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_RODT_COMP_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800880000A0ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_RODT_CTL(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_RODT_CTL(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000078ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_WODT_CTL0(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_WODT_CTL0(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000030ull) + (block_id&1)*0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_WODT_CTL1(unsigned long block_id)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0)))))
+ cvmx_warn("CVMX_LMCX_WODT_CTL1(%lu) is invalid on this chip\n", block_id);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180088000080ull) + (block_id&1)*0x60000000ull;
+}
+
+#define CVMX_MIO_BOOT_BIST_STAT CVMX_MIO_BOOT_BIST_STAT_FUNC()
+static inline uint64_t CVMX_MIO_BOOT_BIST_STAT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800000000F8ull);
+}
+
+#define CVMX_MIO_BOOT_COMP CVMX_MIO_BOOT_COMP_FUNC()
+static inline uint64_t CVMX_MIO_BOOT_COMP_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_MIO_BOOT_COMP not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800000000B8ull);
+}
+
+static inline uint64_t CVMX_MIO_BOOT_DMA_CFGX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 2))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_BOOT_DMA_CFGX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000100ull) + (offset&3)*8;
+}
+
+static inline uint64_t CVMX_MIO_BOOT_DMA_INTX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 2))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_BOOT_DMA_INTX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000138ull) + (offset&3)*8;
+}
+
+static inline uint64_t CVMX_MIO_BOOT_DMA_INT_ENX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 2))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_BOOT_DMA_INT_ENX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000150ull) + (offset&3)*8;
+}
+
+static inline uint64_t CVMX_MIO_BOOT_DMA_TIMX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 2))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_BOOT_DMA_TIMX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000120ull) + (offset&3)*8;
+}
+
+#define CVMX_MIO_BOOT_ERR CVMX_MIO_BOOT_ERR_FUNC()
+static inline uint64_t CVMX_MIO_BOOT_ERR_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800000000A0ull);
+}
+
+#define CVMX_MIO_BOOT_INT CVMX_MIO_BOOT_INT_FUNC()
+static inline uint64_t CVMX_MIO_BOOT_INT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800000000A8ull);
+}
+
+#define CVMX_MIO_BOOT_LOC_ADR CVMX_MIO_BOOT_LOC_ADR_FUNC()
+static inline uint64_t CVMX_MIO_BOOT_LOC_ADR_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000000090ull);
+}
+
+static inline uint64_t CVMX_MIO_BOOT_LOC_CFGX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_BOOT_LOC_CFGX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000080ull) + (offset&1)*8;
+}
+
+#define CVMX_MIO_BOOT_LOC_DAT CVMX_MIO_BOOT_LOC_DAT_FUNC()
+static inline uint64_t CVMX_MIO_BOOT_LOC_DAT_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000000098ull);
+}
+
+#define CVMX_MIO_BOOT_PIN_DEFS CVMX_MIO_BOOT_PIN_DEFS_FUNC()
+static inline uint64_t CVMX_MIO_BOOT_PIN_DEFS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_MIO_BOOT_PIN_DEFS not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x00011800000000C0ull);
+}
+
+static inline uint64_t CVMX_MIO_BOOT_REG_CFGX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_MIO_BOOT_REG_CFGX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000000ull) + (offset&7)*8;
+}
+
+static inline uint64_t CVMX_MIO_BOOT_REG_TIMX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7)))))
+ cvmx_warn("CVMX_MIO_BOOT_REG_TIMX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000040ull) + (offset&7)*8;
+}
+
+#define CVMX_MIO_BOOT_THR CVMX_MIO_BOOT_THR_FUNC()
+static inline uint64_t CVMX_MIO_BOOT_THR_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x00011800000000B0ull);
+}
+
+static inline uint64_t CVMX_MIO_FUS_BNK_DATX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3)))))
+ cvmx_warn("CVMX_MIO_FUS_BNK_DATX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001520ull) + (offset&3)*8;
+}
+
+#define CVMX_MIO_FUS_DAT0 CVMX_MIO_FUS_DAT0_FUNC()
+static inline uint64_t CVMX_MIO_FUS_DAT0_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000001400ull);
+}
+
+#define CVMX_MIO_FUS_DAT1 CVMX_MIO_FUS_DAT1_FUNC()
+static inline uint64_t CVMX_MIO_FUS_DAT1_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000001408ull);
+}
+
+#define CVMX_MIO_FUS_DAT2 CVMX_MIO_FUS_DAT2_FUNC()
+static inline uint64_t CVMX_MIO_FUS_DAT2_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000001410ull);
+}
+
+#define CVMX_MIO_FUS_DAT3 CVMX_MIO_FUS_DAT3_FUNC()
+static inline uint64_t CVMX_MIO_FUS_DAT3_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000001418ull);
+}
+
+#define CVMX_MIO_FUS_EMA CVMX_MIO_FUS_EMA_FUNC()
+static inline uint64_t CVMX_MIO_FUS_EMA_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(!OCTEON_IS_MODEL(OCTEON_CN3XXX)))
+ cvmx_warn("CVMX_MIO_FUS_EMA not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001550ull);
+}
+
+#define CVMX_MIO_FUS_PDF CVMX_MIO_FUS_PDF_FUNC()
+static inline uint64_t CVMX_MIO_FUS_PDF_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(!OCTEON_IS_MODEL(OCTEON_CN3XXX)))
+ cvmx_warn("CVMX_MIO_FUS_PDF not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001420ull);
+}
+
+#define CVMX_MIO_FUS_PLL CVMX_MIO_FUS_PLL_FUNC()
+static inline uint64_t CVMX_MIO_FUS_PLL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(!OCTEON_IS_MODEL(OCTEON_CN3XXX)))
+ cvmx_warn("CVMX_MIO_FUS_PLL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001580ull);
+}
+
+#define CVMX_MIO_FUS_PROG CVMX_MIO_FUS_PROG_FUNC()
+static inline uint64_t CVMX_MIO_FUS_PROG_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000001510ull);
+}
+
+#define CVMX_MIO_FUS_PROG_TIMES CVMX_MIO_FUS_PROG_TIMES_FUNC()
+static inline uint64_t CVMX_MIO_FUS_PROG_TIMES_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(!OCTEON_IS_MODEL(OCTEON_CN3XXX)))
+ cvmx_warn("CVMX_MIO_FUS_PROG_TIMES not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001518ull);
+}
+
+#define CVMX_MIO_FUS_RCMD CVMX_MIO_FUS_RCMD_FUNC()
+static inline uint64_t CVMX_MIO_FUS_RCMD_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000001500ull);
+}
+
+#define CVMX_MIO_FUS_SPR_REPAIR_RES CVMX_MIO_FUS_SPR_REPAIR_RES_FUNC()
+static inline uint64_t CVMX_MIO_FUS_SPR_REPAIR_RES_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000001548ull);
+}
+
+#define CVMX_MIO_FUS_SPR_REPAIR_SUM CVMX_MIO_FUS_SPR_REPAIR_SUM_FUNC()
+static inline uint64_t CVMX_MIO_FUS_SPR_REPAIR_SUM_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000001540ull);
+}
+
+#define CVMX_MIO_FUS_UNLOCK CVMX_MIO_FUS_UNLOCK_FUNC()
+static inline uint64_t CVMX_MIO_FUS_UNLOCK_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_MIO_FUS_UNLOCK not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001578ull);
+}
+
+#define CVMX_MIO_FUS_WADR CVMX_MIO_FUS_WADR_FUNC()
+static inline uint64_t CVMX_MIO_FUS_WADR_FUNC(void)
+{
+ return CVMX_ADD_IO_SEG(0x0001180000001508ull);
+}
+
+#define CVMX_MIO_NDF_DMA_CFG CVMX_MIO_NDF_DMA_CFG_FUNC()
+static inline uint64_t CVMX_MIO_NDF_DMA_CFG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_NDF_DMA_CFG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000168ull);
+}
+
+#define CVMX_MIO_NDF_DMA_INT CVMX_MIO_NDF_DMA_INT_FUNC()
+static inline uint64_t CVMX_MIO_NDF_DMA_INT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_NDF_DMA_INT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000170ull);
+}
+
+#define CVMX_MIO_NDF_DMA_INT_EN CVMX_MIO_NDF_DMA_INT_EN_FUNC()
+static inline uint64_t CVMX_MIO_NDF_DMA_INT_EN_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_NDF_DMA_INT_EN not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000178ull);
+}
+
+#define CVMX_MIO_PLL_CTL CVMX_MIO_PLL_CTL_FUNC()
+static inline uint64_t CVMX_MIO_PLL_CTL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_MIO_PLL_CTL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001448ull);
+}
+
+#define CVMX_MIO_PLL_SETTING CVMX_MIO_PLL_SETTING_FUNC()
+static inline uint64_t CVMX_MIO_PLL_SETTING_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)))
+ cvmx_warn("CVMX_MIO_PLL_SETTING not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001440ull);
+}
+
+static inline uint64_t CVMX_MIO_TWSX_INT(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_TWSX_INT(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001010ull) + (offset&1)*512;
+}
+
+static inline uint64_t CVMX_MIO_TWSX_SW_TWSI(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_TWSX_SW_TWSI(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001000ull) + (offset&1)*512;
+}
+
+static inline uint64_t CVMX_MIO_TWSX_SW_TWSI_EXT(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_TWSX_SW_TWSI_EXT(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001018ull) + (offset&1)*512;
+}
+
+static inline uint64_t CVMX_MIO_TWSX_TWSI_SW(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_TWSX_TWSI_SW(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000001008ull) + (offset&1)*512;
+}
+
+#define CVMX_MIO_UART2_DLH CVMX_MIO_UART2_DLH_FUNC()
+static inline uint64_t CVMX_MIO_UART2_DLH_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_DLH not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000488ull);
+}
+
+#define CVMX_MIO_UART2_DLL CVMX_MIO_UART2_DLL_FUNC()
+static inline uint64_t CVMX_MIO_UART2_DLL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_DLL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000480ull);
+}
+
+#define CVMX_MIO_UART2_FAR CVMX_MIO_UART2_FAR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_FAR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_FAR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000520ull);
+}
+
+#define CVMX_MIO_UART2_FCR CVMX_MIO_UART2_FCR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_FCR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_FCR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000450ull);
+}
+
+#define CVMX_MIO_UART2_HTX CVMX_MIO_UART2_HTX_FUNC()
+static inline uint64_t CVMX_MIO_UART2_HTX_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_HTX not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000708ull);
+}
+
+#define CVMX_MIO_UART2_IER CVMX_MIO_UART2_IER_FUNC()
+static inline uint64_t CVMX_MIO_UART2_IER_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_IER not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000408ull);
+}
+
+#define CVMX_MIO_UART2_IIR CVMX_MIO_UART2_IIR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_IIR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_IIR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000410ull);
+}
+
+#define CVMX_MIO_UART2_LCR CVMX_MIO_UART2_LCR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_LCR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_LCR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000418ull);
+}
+
+#define CVMX_MIO_UART2_LSR CVMX_MIO_UART2_LSR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_LSR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_LSR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000428ull);
+}
+
+#define CVMX_MIO_UART2_MCR CVMX_MIO_UART2_MCR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_MCR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_MCR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000420ull);
+}
+
+#define CVMX_MIO_UART2_MSR CVMX_MIO_UART2_MSR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_MSR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_MSR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000430ull);
+}
+
+#define CVMX_MIO_UART2_RBR CVMX_MIO_UART2_RBR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_RBR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_RBR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000400ull);
+}
+
+#define CVMX_MIO_UART2_RFL CVMX_MIO_UART2_RFL_FUNC()
+static inline uint64_t CVMX_MIO_UART2_RFL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_RFL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000608ull);
+}
+
+#define CVMX_MIO_UART2_RFW CVMX_MIO_UART2_RFW_FUNC()
+static inline uint64_t CVMX_MIO_UART2_RFW_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_RFW not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000530ull);
+}
+
+#define CVMX_MIO_UART2_SBCR CVMX_MIO_UART2_SBCR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SBCR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SBCR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000620ull);
+}
+
+#define CVMX_MIO_UART2_SCR CVMX_MIO_UART2_SCR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SCR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SCR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000438ull);
+}
+
+#define CVMX_MIO_UART2_SFE CVMX_MIO_UART2_SFE_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SFE_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SFE not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000630ull);
+}
+
+#define CVMX_MIO_UART2_SRR CVMX_MIO_UART2_SRR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SRR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SRR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000610ull);
+}
+
+#define CVMX_MIO_UART2_SRT CVMX_MIO_UART2_SRT_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SRT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SRT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000638ull);
+}
+
+#define CVMX_MIO_UART2_SRTS CVMX_MIO_UART2_SRTS_FUNC()
+static inline uint64_t CVMX_MIO_UART2_SRTS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_SRTS not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000618ull);
+}
+
+#define CVMX_MIO_UART2_STT CVMX_MIO_UART2_STT_FUNC()
+static inline uint64_t CVMX_MIO_UART2_STT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_STT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000700ull);
+}
+
+#define CVMX_MIO_UART2_TFL CVMX_MIO_UART2_TFL_FUNC()
+static inline uint64_t CVMX_MIO_UART2_TFL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_TFL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000600ull);
+}
+
+#define CVMX_MIO_UART2_TFR CVMX_MIO_UART2_TFR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_TFR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_TFR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000528ull);
+}
+
+#define CVMX_MIO_UART2_THR CVMX_MIO_UART2_THR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_THR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_THR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000440ull);
+}
+
+#define CVMX_MIO_UART2_USR CVMX_MIO_UART2_USR_FUNC()
+static inline uint64_t CVMX_MIO_UART2_USR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_MIO_UART2_USR not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000538ull);
+}
+
+static inline uint64_t CVMX_MIO_UARTX_DLH(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_DLH(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000888ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_DLL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_DLL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000880ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_FAR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_FAR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000920ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_FCR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_FCR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000850ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_HTX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_HTX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000B08ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_IER(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_IER(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000808ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_IIR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_IIR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000810ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_LCR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_LCR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000818ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_LSR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_LSR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000828ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_MCR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_MCR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000820ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_MSR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_MSR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000830ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_RBR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_RBR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000800ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_RFL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_RFL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000A08ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_RFW(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_RFW(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000930ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_SBCR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SBCR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000A20ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_SCR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SCR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000838ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_SFE(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SFE(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000A30ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_SRR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SRR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000A10ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_SRT(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SRT(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000A38ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_SRTS(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_SRTS(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000A18ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_STT(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_STT(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000B00ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_TFL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_TFL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000A00ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_TFR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_TFR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000928ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_THR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_THR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000840ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIO_UARTX_USR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 1))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIO_UARTX_USR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001180000000938ull) + (offset&1)*1024;
+}
+
+static inline uint64_t CVMX_MIXX_BIST(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_BIST(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100078ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_CTL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_CTL(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100020ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_INTENA(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_INTENA(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100050ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_IRCNT(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_IRCNT(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100030ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_IRHWM(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_IRHWM(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100028ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_IRING1(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_IRING1(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100010ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_IRING2(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_IRING2(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100018ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_ISR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_ISR(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100048ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_ORCNT(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_ORCNT(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100040ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_ORHWM(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_ORHWM(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100038ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_ORING1(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_ORING1(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100000ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_ORING2(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_ORING2(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100008ull) + (offset&1)*2048;
+}
+
+static inline uint64_t CVMX_MIXX_REMCNT(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1)))))
+ cvmx_warn("CVMX_MIXX_REMCNT(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000100058ull) + (offset&1)*2048;
+}
+
+#define CVMX_MPI_CFG CVMX_MPI_CFG_FUNC()
+static inline uint64_t CVMX_MPI_CFG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)))
+ cvmx_warn("CVMX_MPI_CFG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000001000ull);
+}
+
+static inline uint64_t CVMX_MPI_DATX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 8))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 8)))))
+ cvmx_warn("CVMX_MPI_DATX(%lu) is invalid on this chip\n", offset);
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000001080ull) + (offset&15)*8;
+}
+
+#define CVMX_MPI_STS CVMX_MPI_STS_FUNC()
+static inline uint64_t CVMX_MPI_STS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)))
+ cvmx_warn("CVMX_MPI_STS not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000001008ull);
+}
+
+#define CVMX_MPI_TX CVMX_MPI_TX_FUNC()
+static inline uint64_t CVMX_MPI_TX_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)))
+ cvmx_warn("CVMX_MPI_TX not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070000001010ull);
+}
+
+#define CVMX_NDF_BT_PG_INFO CVMX_NDF_BT_PG_INFO_FUNC()
+static inline uint64_t CVMX_NDF_BT_PG_INFO_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NDF_BT_PG_INFO not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070001000018ull);
+}
+
+#define CVMX_NDF_CMD CVMX_NDF_CMD_FUNC()
+static inline uint64_t CVMX_NDF_CMD_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NDF_CMD not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070001000000ull);
+}
+
+#define CVMX_NDF_DRBELL CVMX_NDF_DRBELL_FUNC()
+static inline uint64_t CVMX_NDF_DRBELL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NDF_DRBELL not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070001000030ull);
+}
+
+#define CVMX_NDF_ECC_CNT CVMX_NDF_ECC_CNT_FUNC()
+static inline uint64_t CVMX_NDF_ECC_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NDF_ECC_CNT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070001000010ull);
+}
+
+#define CVMX_NDF_INT CVMX_NDF_INT_FUNC()
+static inline uint64_t CVMX_NDF_INT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NDF_INT not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070001000020ull);
+}
+
+#define CVMX_NDF_INT_EN CVMX_NDF_INT_EN_FUNC()
+static inline uint64_t CVMX_NDF_INT_EN_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NDF_INT_EN not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070001000028ull);
+}
+
+#define CVMX_NDF_MISC CVMX_NDF_MISC_FUNC()
+static inline uint64_t CVMX_NDF_MISC_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NDF_MISC not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070001000008ull);
+}
+
+#define CVMX_NDF_ST_REG CVMX_NDF_ST_REG_FUNC()
+static inline uint64_t CVMX_NDF_ST_REG_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NDF_ST_REG not supported on this chip\n");
+#endif
+ return CVMX_ADD_IO_SEG(0x0001070001000038ull);
+}
+
+static inline uint64_t CVMX_NPEI_BAR1_INDEXX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_BAR1_INDEXX(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000000000ull + (offset&31)*16;
+}
+
+#define CVMX_NPEI_BIST_STATUS CVMX_NPEI_BIST_STATUS_FUNC()
+static inline uint64_t CVMX_NPEI_BIST_STATUS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_BIST_STATUS not supported on this chip\n");
+#endif
+ return 0x0000000000000580ull;
+}
+
+#define CVMX_NPEI_BIST_STATUS2 CVMX_NPEI_BIST_STATUS2_FUNC()
+static inline uint64_t CVMX_NPEI_BIST_STATUS2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_BIST_STATUS2 not supported on this chip\n");
+#endif
+ return 0x0000000000000680ull;
+}
+
+#define CVMX_NPEI_CTL_PORT0 CVMX_NPEI_CTL_PORT0_FUNC()
+static inline uint64_t CVMX_NPEI_CTL_PORT0_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_CTL_PORT0 not supported on this chip\n");
+#endif
+ return 0x0000000000000250ull;
+}
+
+#define CVMX_NPEI_CTL_PORT1 CVMX_NPEI_CTL_PORT1_FUNC()
+static inline uint64_t CVMX_NPEI_CTL_PORT1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_CTL_PORT1 not supported on this chip\n");
+#endif
+ return 0x0000000000000260ull;
+}
+
+#define CVMX_NPEI_CTL_STATUS CVMX_NPEI_CTL_STATUS_FUNC()
+static inline uint64_t CVMX_NPEI_CTL_STATUS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_CTL_STATUS not supported on this chip\n");
+#endif
+ return 0x0000000000000570ull;
+}
+
+#define CVMX_NPEI_CTL_STATUS2 CVMX_NPEI_CTL_STATUS2_FUNC()
+static inline uint64_t CVMX_NPEI_CTL_STATUS2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_CTL_STATUS2 not supported on this chip\n");
+#endif
+ return 0x0000000000003C00ull;
+}
+
+#define CVMX_NPEI_DATA_OUT_CNT CVMX_NPEI_DATA_OUT_CNT_FUNC()
+static inline uint64_t CVMX_NPEI_DATA_OUT_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DATA_OUT_CNT not supported on this chip\n");
+#endif
+ return 0x00000000000005F0ull;
+}
+
+#define CVMX_NPEI_DBG_DATA CVMX_NPEI_DBG_DATA_FUNC()
+static inline uint64_t CVMX_NPEI_DBG_DATA_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DBG_DATA not supported on this chip\n");
+#endif
+ return 0x0000000000000510ull;
+}
+
+#define CVMX_NPEI_DBG_SELECT CVMX_NPEI_DBG_SELECT_FUNC()
+static inline uint64_t CVMX_NPEI_DBG_SELECT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DBG_SELECT not supported on this chip\n");
+#endif
+ return 0x0000000000000500ull;
+}
+
+#define CVMX_NPEI_DMA0_INT_LEVEL CVMX_NPEI_DMA0_INT_LEVEL_FUNC()
+static inline uint64_t CVMX_NPEI_DMA0_INT_LEVEL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DMA0_INT_LEVEL not supported on this chip\n");
+#endif
+ return 0x00000000000005C0ull;
+}
+
+#define CVMX_NPEI_DMA1_INT_LEVEL CVMX_NPEI_DMA1_INT_LEVEL_FUNC()
+static inline uint64_t CVMX_NPEI_DMA1_INT_LEVEL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DMA1_INT_LEVEL not supported on this chip\n");
+#endif
+ return 0x00000000000005D0ull;
+}
+
+static inline uint64_t CVMX_NPEI_DMAX_COUNTS(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_NPEI_DMAX_COUNTS(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000000450ull + (offset&7)*16;
+}
+
+static inline uint64_t CVMX_NPEI_DMAX_DBELL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_NPEI_DMAX_DBELL(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x00000000000003B0ull + (offset&7)*16;
+}
+
+static inline uint64_t CVMX_NPEI_DMAX_IBUFF_SADDR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_NPEI_DMAX_IBUFF_SADDR(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000000400ull + (offset&7)*16;
+}
+
+static inline uint64_t CVMX_NPEI_DMAX_NADDR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 4))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 4)))))
+ cvmx_warn("CVMX_NPEI_DMAX_NADDR(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x00000000000004A0ull + (offset&7)*16;
+}
+
+#define CVMX_NPEI_DMA_CNTS CVMX_NPEI_DMA_CNTS_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_CNTS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DMA_CNTS not supported on this chip\n");
+#endif
+ return 0x00000000000005E0ull;
+}
+
+#define CVMX_NPEI_DMA_CONTROL CVMX_NPEI_DMA_CONTROL_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_CONTROL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DMA_CONTROL not supported on this chip\n");
+#endif
+ return 0x00000000000003A0ull;
+}
+
+#define CVMX_NPEI_DMA_PCIE_REQ_NUM CVMX_NPEI_DMA_PCIE_REQ_NUM_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_PCIE_REQ_NUM_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_DMA_PCIE_REQ_NUM not supported on this chip\n");
+#endif
+ return 0x00000000000005B0ull;
+}
+
+#define CVMX_NPEI_DMA_STATE1 CVMX_NPEI_DMA_STATE1_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_STATE1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NPEI_DMA_STATE1 not supported on this chip\n");
+#endif
+ return 0x00000000000006C0ull;
+}
+
+#define CVMX_NPEI_DMA_STATE1_P1 CVMX_NPEI_DMA_STATE1_P1_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_STATE1_P1_FUNC(void)
+{
+ return 0x0000000000000680ull;
+}
+
+#define CVMX_NPEI_DMA_STATE2 CVMX_NPEI_DMA_STATE2_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_STATE2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ cvmx_warn("CVMX_NPEI_DMA_STATE2 not supported on this chip\n");
+#endif
+ return 0x00000000000006D0ull;
+}
+
+#define CVMX_NPEI_DMA_STATE2_P1 CVMX_NPEI_DMA_STATE2_P1_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_STATE2_P1_FUNC(void)
+{
+ return 0x0000000000000690ull;
+}
+
+#define CVMX_NPEI_DMA_STATE3_P1 CVMX_NPEI_DMA_STATE3_P1_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_STATE3_P1_FUNC(void)
+{
+ return 0x00000000000006A0ull;
+}
+
+#define CVMX_NPEI_DMA_STATE4_P1 CVMX_NPEI_DMA_STATE4_P1_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_STATE4_P1_FUNC(void)
+{
+ return 0x00000000000006B0ull;
+}
+
+#define CVMX_NPEI_DMA_STATE5_P1 CVMX_NPEI_DMA_STATE5_P1_FUNC()
+static inline uint64_t CVMX_NPEI_DMA_STATE5_P1_FUNC(void)
+{
+ return 0x00000000000006C0ull;
+}
+
+#define CVMX_NPEI_INT_A_ENB CVMX_NPEI_INT_A_ENB_FUNC()
+static inline uint64_t CVMX_NPEI_INT_A_ENB_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_A_ENB not supported on this chip\n");
+#endif
+ return 0x0000000000000560ull;
+}
+
+#define CVMX_NPEI_INT_A_ENB2 CVMX_NPEI_INT_A_ENB2_FUNC()
+static inline uint64_t CVMX_NPEI_INT_A_ENB2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_A_ENB2 not supported on this chip\n");
+#endif
+ return 0x0000000000003CE0ull;
+}
+
+#define CVMX_NPEI_INT_A_SUM CVMX_NPEI_INT_A_SUM_FUNC()
+static inline uint64_t CVMX_NPEI_INT_A_SUM_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_A_SUM not supported on this chip\n");
+#endif
+ return 0x0000000000000550ull;
+}
+
+#define CVMX_NPEI_INT_ENB CVMX_NPEI_INT_ENB_FUNC()
+static inline uint64_t CVMX_NPEI_INT_ENB_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_ENB not supported on this chip\n");
+#endif
+ return 0x0000000000000540ull;
+}
+
+#define CVMX_NPEI_INT_ENB2 CVMX_NPEI_INT_ENB2_FUNC()
+static inline uint64_t CVMX_NPEI_INT_ENB2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_ENB2 not supported on this chip\n");
+#endif
+ return 0x0000000000003CD0ull;
+}
+
+#define CVMX_NPEI_INT_INFO CVMX_NPEI_INT_INFO_FUNC()
+static inline uint64_t CVMX_NPEI_INT_INFO_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_INFO not supported on this chip\n");
+#endif
+ return 0x0000000000000590ull;
+}
+
+#define CVMX_NPEI_INT_SUM CVMX_NPEI_INT_SUM_FUNC()
+static inline uint64_t CVMX_NPEI_INT_SUM_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_SUM not supported on this chip\n");
+#endif
+ return 0x0000000000000530ull;
+}
+
+#define CVMX_NPEI_INT_SUM2 CVMX_NPEI_INT_SUM2_FUNC()
+static inline uint64_t CVMX_NPEI_INT_SUM2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_INT_SUM2 not supported on this chip\n");
+#endif
+ return 0x0000000000003CC0ull;
+}
+
+#define CVMX_NPEI_LAST_WIN_RDATA0 CVMX_NPEI_LAST_WIN_RDATA0_FUNC()
+static inline uint64_t CVMX_NPEI_LAST_WIN_RDATA0_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_LAST_WIN_RDATA0 not supported on this chip\n");
+#endif
+ return 0x0000000000000600ull;
+}
+
+#define CVMX_NPEI_LAST_WIN_RDATA1 CVMX_NPEI_LAST_WIN_RDATA1_FUNC()
+static inline uint64_t CVMX_NPEI_LAST_WIN_RDATA1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_LAST_WIN_RDATA1 not supported on this chip\n");
+#endif
+ return 0x0000000000000610ull;
+}
+
+#define CVMX_NPEI_MEM_ACCESS_CTL CVMX_NPEI_MEM_ACCESS_CTL_FUNC()
+static inline uint64_t CVMX_NPEI_MEM_ACCESS_CTL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MEM_ACCESS_CTL not supported on this chip\n");
+#endif
+ return 0x00000000000004F0ull;
+}
+
+static inline uint64_t CVMX_NPEI_MEM_ACCESS_SUBIDX(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 12) && (offset <= 27)))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset >= 12) && (offset <= 27))))))
+ cvmx_warn("CVMX_NPEI_MEM_ACCESS_SUBIDX(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000000340ull + (offset&31)*16 - 16*12;
+}
+
+#define CVMX_NPEI_MSI_ENB0 CVMX_NPEI_MSI_ENB0_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_ENB0_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_ENB0 not supported on this chip\n");
+#endif
+ return 0x0000000000003C50ull;
+}
+
+#define CVMX_NPEI_MSI_ENB1 CVMX_NPEI_MSI_ENB1_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_ENB1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_ENB1 not supported on this chip\n");
+#endif
+ return 0x0000000000003C60ull;
+}
+
+#define CVMX_NPEI_MSI_ENB2 CVMX_NPEI_MSI_ENB2_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_ENB2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_ENB2 not supported on this chip\n");
+#endif
+ return 0x0000000000003C70ull;
+}
+
+#define CVMX_NPEI_MSI_ENB3 CVMX_NPEI_MSI_ENB3_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_ENB3_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_ENB3 not supported on this chip\n");
+#endif
+ return 0x0000000000003C80ull;
+}
+
+#define CVMX_NPEI_MSI_RCV0 CVMX_NPEI_MSI_RCV0_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_RCV0_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_RCV0 not supported on this chip\n");
+#endif
+ return 0x0000000000003C10ull;
+}
+
+#define CVMX_NPEI_MSI_RCV1 CVMX_NPEI_MSI_RCV1_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_RCV1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_RCV1 not supported on this chip\n");
+#endif
+ return 0x0000000000003C20ull;
+}
+
+#define CVMX_NPEI_MSI_RCV2 CVMX_NPEI_MSI_RCV2_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_RCV2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_RCV2 not supported on this chip\n");
+#endif
+ return 0x0000000000003C30ull;
+}
+
+#define CVMX_NPEI_MSI_RCV3 CVMX_NPEI_MSI_RCV3_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_RCV3_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_RCV3 not supported on this chip\n");
+#endif
+ return 0x0000000000003C40ull;
+}
+
+#define CVMX_NPEI_MSI_RD_MAP CVMX_NPEI_MSI_RD_MAP_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_RD_MAP_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_RD_MAP not supported on this chip\n");
+#endif
+ return 0x0000000000003CA0ull;
+}
+
+#define CVMX_NPEI_MSI_W1C_ENB0 CVMX_NPEI_MSI_W1C_ENB0_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1C_ENB0_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1C_ENB0 not supported on this chip\n");
+#endif
+ return 0x0000000000003CF0ull;
+}
+
+#define CVMX_NPEI_MSI_W1C_ENB1 CVMX_NPEI_MSI_W1C_ENB1_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1C_ENB1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1C_ENB1 not supported on this chip\n");
+#endif
+ return 0x0000000000003D00ull;
+}
+
+#define CVMX_NPEI_MSI_W1C_ENB2 CVMX_NPEI_MSI_W1C_ENB2_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1C_ENB2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1C_ENB2 not supported on this chip\n");
+#endif
+ return 0x0000000000003D10ull;
+}
+
+#define CVMX_NPEI_MSI_W1C_ENB3 CVMX_NPEI_MSI_W1C_ENB3_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1C_ENB3_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1C_ENB3 not supported on this chip\n");
+#endif
+ return 0x0000000000003D20ull;
+}
+
+#define CVMX_NPEI_MSI_W1S_ENB0 CVMX_NPEI_MSI_W1S_ENB0_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1S_ENB0_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1S_ENB0 not supported on this chip\n");
+#endif
+ return 0x0000000000003D30ull;
+}
+
+#define CVMX_NPEI_MSI_W1S_ENB1 CVMX_NPEI_MSI_W1S_ENB1_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1S_ENB1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1S_ENB1 not supported on this chip\n");
+#endif
+ return 0x0000000000003D40ull;
+}
+
+#define CVMX_NPEI_MSI_W1S_ENB2 CVMX_NPEI_MSI_W1S_ENB2_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1S_ENB2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1S_ENB2 not supported on this chip\n");
+#endif
+ return 0x0000000000003D50ull;
+}
+
+#define CVMX_NPEI_MSI_W1S_ENB3 CVMX_NPEI_MSI_W1S_ENB3_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_W1S_ENB3_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_W1S_ENB3 not supported on this chip\n");
+#endif
+ return 0x0000000000003D60ull;
+}
+
+#define CVMX_NPEI_MSI_WR_MAP CVMX_NPEI_MSI_WR_MAP_FUNC()
+static inline uint64_t CVMX_NPEI_MSI_WR_MAP_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_MSI_WR_MAP not supported on this chip\n");
+#endif
+ return 0x0000000000003C90ull;
+}
+
+#define CVMX_NPEI_PCIE_CREDIT_CNT CVMX_NPEI_PCIE_CREDIT_CNT_FUNC()
+static inline uint64_t CVMX_NPEI_PCIE_CREDIT_CNT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PCIE_CREDIT_CNT not supported on this chip\n");
+#endif
+ return 0x0000000000003D70ull;
+}
+
+#define CVMX_NPEI_PCIE_MSI_RCV CVMX_NPEI_PCIE_MSI_RCV_FUNC()
+static inline uint64_t CVMX_NPEI_PCIE_MSI_RCV_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PCIE_MSI_RCV not supported on this chip\n");
+#endif
+ return 0x0000000000003CB0ull;
+}
+
+#define CVMX_NPEI_PCIE_MSI_RCV_B1 CVMX_NPEI_PCIE_MSI_RCV_B1_FUNC()
+static inline uint64_t CVMX_NPEI_PCIE_MSI_RCV_B1_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PCIE_MSI_RCV_B1 not supported on this chip\n");
+#endif
+ return 0x0000000000000650ull;
+}
+
+#define CVMX_NPEI_PCIE_MSI_RCV_B2 CVMX_NPEI_PCIE_MSI_RCV_B2_FUNC()
+static inline uint64_t CVMX_NPEI_PCIE_MSI_RCV_B2_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PCIE_MSI_RCV_B2 not supported on this chip\n");
+#endif
+ return 0x0000000000000660ull;
+}
+
+#define CVMX_NPEI_PCIE_MSI_RCV_B3 CVMX_NPEI_PCIE_MSI_RCV_B3_FUNC()
+static inline uint64_t CVMX_NPEI_PCIE_MSI_RCV_B3_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PCIE_MSI_RCV_B3 not supported on this chip\n");
+#endif
+ return 0x0000000000000670ull;
+}
+
+static inline uint64_t CVMX_NPEI_PKTX_CNTS(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_CNTS(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000002400ull + (offset&31)*16;
+}
+
+static inline uint64_t CVMX_NPEI_PKTX_INSTR_BADDR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_INSTR_BADDR(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000002800ull + (offset&31)*16;
+}
+
+static inline uint64_t CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000002C00ull + (offset&31)*16;
+}
+
+static inline uint64_t CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000003000ull + (offset&31)*16;
+}
+
+static inline uint64_t CVMX_NPEI_PKTX_INSTR_HEADER(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_INSTR_HEADER(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000003400ull + (offset&31)*16;
+}
+
+static inline uint64_t CVMX_NPEI_PKTX_IN_BP(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_IN_BP(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000003800ull + (offset&31)*16;
+}
+
+static inline uint64_t CVMX_NPEI_PKTX_SLIST_BADDR(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_SLIST_BADDR(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000001400ull + (offset&31)*16;
+}
+
+static inline uint64_t CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000001800ull + (offset&31)*16;
+}
+
+static inline uint64_t CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000001C00ull + (offset&31)*16;
+}
+
+#define CVMX_NPEI_PKT_CNT_INT CVMX_NPEI_PKT_CNT_INT_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_CNT_INT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_CNT_INT not supported on this chip\n");
+#endif
+ return 0x0000000000001110ull;
+}
+
+#define CVMX_NPEI_PKT_CNT_INT_ENB CVMX_NPEI_PKT_CNT_INT_ENB_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_CNT_INT_ENB_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_CNT_INT_ENB not supported on this chip\n");
+#endif
+ return 0x0000000000001130ull;
+}
+
+#define CVMX_NPEI_PKT_DATA_OUT_ES CVMX_NPEI_PKT_DATA_OUT_ES_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_DATA_OUT_ES_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_DATA_OUT_ES not supported on this chip\n");
+#endif
+ return 0x00000000000010B0ull;
+}
+
+#define CVMX_NPEI_PKT_DATA_OUT_NS CVMX_NPEI_PKT_DATA_OUT_NS_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_DATA_OUT_NS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_DATA_OUT_NS not supported on this chip\n");
+#endif
+ return 0x00000000000010A0ull;
+}
+
+#define CVMX_NPEI_PKT_DATA_OUT_ROR CVMX_NPEI_PKT_DATA_OUT_ROR_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_DATA_OUT_ROR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_DATA_OUT_ROR not supported on this chip\n");
+#endif
+ return 0x0000000000001090ull;
+}
+
+#define CVMX_NPEI_PKT_DPADDR CVMX_NPEI_PKT_DPADDR_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_DPADDR_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_DPADDR not supported on this chip\n");
+#endif
+ return 0x0000000000001080ull;
+}
+
+#define CVMX_NPEI_PKT_INPUT_CONTROL CVMX_NPEI_PKT_INPUT_CONTROL_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_INPUT_CONTROL_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_INPUT_CONTROL not supported on this chip\n");
+#endif
+ return 0x0000000000001150ull;
+}
+
+#define CVMX_NPEI_PKT_INSTR_ENB CVMX_NPEI_PKT_INSTR_ENB_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_INSTR_ENB_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_INSTR_ENB not supported on this chip\n");
+#endif
+ return 0x0000000000001000ull;
+}
+
+#define CVMX_NPEI_PKT_INSTR_RD_SIZE CVMX_NPEI_PKT_INSTR_RD_SIZE_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_INSTR_RD_SIZE_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_INSTR_RD_SIZE not supported on this chip\n");
+#endif
+ return 0x0000000000001190ull;
+}
+
+#define CVMX_NPEI_PKT_INSTR_SIZE CVMX_NPEI_PKT_INSTR_SIZE_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_INSTR_SIZE_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_INSTR_SIZE not supported on this chip\n");
+#endif
+ return 0x0000000000001020ull;
+}
+
+#define CVMX_NPEI_PKT_INT_LEVELS CVMX_NPEI_PKT_INT_LEVELS_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_INT_LEVELS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_INT_LEVELS not supported on this chip\n");
+#endif
+ return 0x0000000000001100ull;
+}
+
+#define CVMX_NPEI_PKT_IN_BP CVMX_NPEI_PKT_IN_BP_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_IN_BP_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_IN_BP not supported on this chip\n");
+#endif
+ return 0x00000000000006B0ull;
+}
+
+static inline uint64_t CVMX_NPEI_PKT_IN_DONEX_CNTS(unsigned long offset)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 31))) ||
+ (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 31)))))
+ cvmx_warn("CVMX_NPEI_PKT_IN_DONEX_CNTS(%lu) is invalid on this chip\n", offset);
+#endif
+ return 0x0000000000002000ull + (offset&31)*16;
+}
+
+#define CVMX_NPEI_PKT_IN_INSTR_COUNTS CVMX_NPEI_PKT_IN_INSTR_COUNTS_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_IN_INSTR_COUNTS_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_IN_INSTR_COUNTS not supported on this chip\n");
+#endif
+ return 0x00000000000006A0ull;
+}
+
+#define CVMX_NPEI_PKT_IN_PCIE_PORT CVMX_NPEI_PKT_IN_PCIE_PORT_FUNC()
+static inline uint64_t CVMX_NPEI_PKT_IN_PCIE_PORT_FUNC(void)
+{
+#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
+ if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
+ cvmx_warn("CVMX_NPEI_PKT_IN_PCIE_PORT not supported on this chip\n");
+#endif
+ return 0x00000000000011A0ull;
+}
+
+#define